diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..7ae3f2b
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,22 @@
+.git
+__pycache__
+*.pyc
+*.pyo
+*.pyd
+.pytest_cache
+.venv
+venv
+env
+node_modules
+dist
+build
+*.egg-info
+docker
+docker-compose.yml
+.gemini
+.github
+docs
+scratch
+testall
+testremote
+automation-template.yaml
diff --git a/.gitignore b/.gitignore
index dc4eee6..5456202 100644
--- a/.gitignore
+++ b/.gitignore
@@ -164,3 +164,7 @@ connpy_roadmap.md
MULTI_USER_PLAN.md
COPILOT_PLAN.md
ARCHITECTURAL_DEBT_REFACTOR.md
+
+#themes
+nord.yml
+theme.py
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..4a319b4
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,8 @@
+include LICENSE
+include README.md
+include requirements.txt
+recursive-include connpy/core_plugins *
+recursive-include connpy/proto *
+recursive-include connpy/grpc *.proto
+recursive-exclude * __pycache__
+recursive-exclude * *.py[co]
diff --git a/README.md b/README.md
index d5c32af..3439f89 100644
--- a/README.md
+++ b/README.md
@@ -9,519 +9,182 @@
[](https://github.com/fluzzi/connpy/blob/main/LICENSE)
[](https://pypi.org/pypi/connpy/)
-Connpy is a SSH, SFTP, Telnet, kubectl, Docker pod, and AWS SSM connection manager and automation module for Linux, Mac, and Docker.
+**Connpy** is a powerful Connection Manager and Network Automation Platform for Linux, Mac, and Docker. It provides a unified interface for **SSH, SFTP, Telnet, kubectl, Docker pods, and AWS SSM**.
+
+The v6 release introduces the **AI Copilot**, an interactive terminal assistant that understands your network context and helps you manage your infrastructure more intelligently.
+
+
+## π€ AI Copilot (New in v6)
+The AI Copilot is deeply integrated into your terminal workflow:
+- **Terminal Context Awareness**: The Copilot can "see" your screen output, helping you diagnose errors or analyze command results in real-time.
+- **Hybrid Multi-Agent System**: Automatically escalates complex tasks between the **Network Engineer** (execution) and the **Network Architect** (strategy).
+- **MCP Integration**: Dynamically load tools from external providers (6WIND, AWS, etc.) via the Model Context Protocol.
+- **Interactive Chat**: Launch with `conn ai` for a collaborative troubleshooting session.
+
+
+## Core Features
+- **Multi-Protocol**: Native support for SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM.
+- **Context Management**: Set regex-based contexts to manage specific nodes across different environments (work, home, clients).
+- **Advanced Inventory**:
+ - Organize nodes in folders (`@folder`) and subfolders (`@subfolder@folder`).
+ - Use Global Profiles (`@profilename`) to manage shared credentials easily.
+ - Bulk creation, copying, moving, and export/import of nodes.
+- **Modern UI**: High-performance terminal experience with `prompt-toolkit`, including:
+ - Fuzzy search integration with `fzf`.
+ - Advanced tab completion.
+ - Syntax highlighting and customizable themes.
+- **Automation Engine**: Run parallel tasks and playbooks on multiple devices with variable support.
+- **Plugin System**: Build and execute custom Python scripts locally or on a remote gRPC server.
+- **gRPC Architecture**: Fully decoupled Client/Server model for distributed management.
+- **Privacy & Sync**: Local-first encrypted storage (RSA/OAEP) with optional Google Drive backup.
## Installation
+```bash
pip install connpy
-
-### Run it in Windows using docker
```
+
+### Run it in Windows/Linux using Docker
+```bash
git clone https://github.com/fluzzi/connpy
-docker compose -f path/to/folder/docker-compose.yml build
-docker compose -f path/to/folder/docker-compose.yml run -it connpy-app
+cd connpy
+docker compose build
+
+# Run it like a native app (completely silent)
+docker compose --log-level ERROR run --rm --remove-orphans connpy-app [command]
+
+# Pro Tip: Add this alias for a 100% native experience from any folder
+alias conn='docker compose -f /path/to/connpy/docker-compose.yml --log-level ERROR run --rm --remove-orphans connpy-app'
```
-## Connection manager
+---
+
+## π Privacy & Integration
+
### Privacy Policy
-
-Connpy is committed to protecting your privacy. Our privacy policy explains how we handle user data:
-
-- **Data Access**: Connpy accesses data necessary for managing remote host connections, including server addresses, usernames, and passwords. This data is stored locally on your machine and is not transmitted or shared with any third parties.
-- **Data Usage**: User data is used solely for the purpose of managing and automating SSH, Telnet, and SSM connections.
-- **Data Storage**: All connection details are stored locally and securely on your device. We do not store or process this data on our servers.
-- **Data Sharing**: We do not share any user data with third parties.
+Connpy is committed to protecting your privacy:
+- **Local Storage**: All server addresses, usernames, and passwords are encrypted and stored **only** on your machine. No data is transmitted to our servers.
+- **Data Access**: Data is used solely for managing and automating your connections.
### Google Integration
+Used strictly for backup:
+- **Backup**: Sync your encrypted configuration with your Google Drive account.
+- **Scoped Access**: Connpy only accesses its own backup files.
-Connpy integrates with Google services for backup purposes:
+---
-- **Configuration Backup**: The app allows users to store their device information in the app configuration. This configuration can be synced with Google services to create backups.
-- **Data Access**: Connpy only accesses its own files and does not access any other files on your Google account.
-- **Data Usage**: The data is used solely for backup and restore purposes, ensuring that your device information and configurations are safe and recoverable.
-- **Data Sharing**: Connpy does not share any user data with third parties, including Google. The backup data is only accessible by the user.
+## Usage
-For more detailed information, please read our [Privacy Policy](https://connpy.gederico.dynu.net/fluzzi32/connpy/src/branch/main/PRIVATE_POLICY.md).
-
-
-### Features
- - Manage connections using SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM.
- - Set contexts to manage specific nodes from specific contexts (work/home/clients/etc).
- - You can generate profiles and reference them from nodes using @profilename so you don't
- need to edit multiple nodes when changing passwords or other information.
- - Nodes can be stored on @folder or @subfolder@folder to organize your devices. They can
- be referenced using node@subfolder@folder or node@folder.
- - If you have too many nodes, get a completion script using: conn config --completion.
- Or use fzf by installing pyfzf and running conn config --fzf true.
- - Create in bulk, copy, move, export, and import nodes for easy management.
- - Run automation scripts on network devices.
- - Use AI with a multi-agent system (Engineer/Architect) to manage devices.
- Supports any LLM provider via litellm (OpenAI, Anthropic, Google, etc.).
- Features streaming responses, interactive chat, and extensible plugin tools.
- - Add plugins with your own scripts, and execute them remotely.
- - Fully decoupled gRPC Client/Server architecture.
- - Unified UI with syntax highlighting and theming.
- - Much more!
-
-### Usage:
-```
+```bash
usage: conn [-h] [--add | --del | --mod | --show | --debug] [node|folder] [--sftp]
- conn {profile,move,mv,copy,cp,list,ls,bulk,export,import,ai,run,api,plugin,config,sync,context} ...
-
-positional arguments:
- node|folder node[@subfolder][@folder]
- Connect to specific node or show all matching nodes
- [@subfolder][@folder]
- Show all available connections globally or in specified path
-
-options:
- -h, --help show this help message and exit
- -v, --version Show version
- -a, --add Add new node[@subfolder][@folder] or [@subfolder]@folder
- -r, --del, --rm Delete node[@subfolder][@folder] or [@subfolder]@folder
- -e, --mod, --edit Modify node[@subfolder][@folder]
- -s, --show Show node[@subfolder][@folder]
- -d, --debug Display all conections steps
- -t, --sftp Connects using sftp instead of ssh
- --service-mode Set the backend service mode (local or remote)
- --remote Connect to a remote connpy service via gRPC
- --theme UI Output theme (dark, light, or path)
-
-Commands:
- profile Manage profiles
- move(mv) Move node
- copy(cp) Copy node
- list(ls) List profiles, nodes or folders
- bulk Add nodes in bulk
- export Export connection folder to Yaml file
- import Import connection folder to config from Yaml file
- ai Make request to an AI
- run Run scripts or commands on nodes
- api Start and stop connpy api
- plugin Manage plugins
- config Manage app config
- sync Sync config with Google
- context Manage contexts with regex matching
+ conn {profile,move,copy,list,bulk,export,import,ai,run,api,plugin,config,sync,context} ...
```
-### Manage profiles:
-```
-usage: conn profile [-h] (--add | --del | --mod | --show) profile
+### Basic Examples:
+```bash
+# Add a folder and subfolder
+conn --add @office
+conn --add @datacenter@office
-positional arguments:
- profile Name of profile to manage
+# Add a node with a profile
+conn --add server1@datacenter@office --profile @myuser
-options:
- -h, --help show this help message and exit
- -a, --add Add new profile
- -r, --del, --rm Delete profile
- -e, --mod, --edit Modify profile
- -s, --show Show profile
+# Connect to a node (fuzzy match)
+conn server1
+# Start the AI Copilot
+conn ai
+
+# Run a command on all nodes in a folder
+conn run @office "uptime"
```
-### Examples:
-```
- #Add new profile
- conn profile --add office-user
- #Add new folder
- conn --add @office
- #Add new subfolder
- conn --add @datacenter@office
- #Add node to subfolder
- conn --add server@datacenter@office
- #Add node to folder
- conn --add pc@office
- #Show node information
- conn --show server@datacenter@office
- #Connect to nodes
- conn pc@office
- conn server
- #Create and set new context
- conn context -a office .*@office
- conn context --set office
- #Run a command in a node
- conn run server ls -la
-```
+---
+
+## π Plugin System
+Connpy supports a robust plugin architecture where scripts can run transparently on a remote gRPC server.
+
+### Structure
+Plugins must be Python files containing:
+- **Class `Parser`**: Defines `argparse` arguments.
+- **Class `Entrypoint`**: Execution logic.
+- **Class `Preload`**: (Optional) Hooks and modifications to the core app.
+
+See the [Plugin Requirements section](#plugin-requirements-for-connpy) for full technical details.
+
+---
+
## Plugin Requirements for Connpy
### Remote Plugin Execution
When Connpy operates in remote mode, plugins are executed **transparently on the server**:
- The client automatically downloads the plugin source code (`Parser` class context) to generate the local `argparse` structure and provide autocompletion.
-- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory, ensuring the plugin runs securely against the server's inventory without passing sensitive data to the client.
-- You can manage remote plugins using the `--remote` flag (e.g. `connpy plugin --add myplugin script.py --remote`).
+- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory.
+- You can manage remote plugins using the `--remote` flag.
### General Structure
-- The plugin script must be a Python file.
-- Only the following top-level elements are allowed in the plugin script:
- - Class definitions
- - Function definitions
- - Import statements
- - The `if __name__ == "__main__":` block for standalone execution
- - Pass statements
-
-### Specific Class Requirements
-- The plugin script must define specific classes with particular attributes and methods. Each class serves a distinct role within the plugin's architecture:
- 1. **Class `Parser`**:
- - **Purpose**: Handles parsing of command-line arguments.
- - **Requirements**:
- - Must contain only one method: `__init__`.
- - The `__init__` method must initialize at least one attribute:
- - `self.parser`: An instance of `argparse.ArgumentParser`.
- 2. **Class `Entrypoint`**:
- - **Purpose**: Acts as the entry point for plugin execution, utilizing parsed arguments and integrating with the main application.
- - **Requirements**:
- - Must have an `__init__` method that accepts exactly three parameters besides `self`:
- - `args`: Arguments passed to the plugin.
- - The parser instance (typically `self.parser` from the `Parser` class).
- - The Connapp instance to interact with the Connpy app.
- 3. **Class `Preload`**:
- - **Purpose**: Performs any necessary preliminary setup or configuration independent of the main parsing and entry logic.
- - **Requirements**:
- - Contains at least an `__init__` method that accepts parameter connapp besides `self`.
-
-### Class Dependencies and Combinations
-- **Dependencies**:
- - `Parser` and `Entrypoint` are interdependent and must both be present if one is included.
- - `Preload` is independent and may exist alone or alongside the other classes.
-- **Valid Combinations**:
- - `Parser` and `Entrypoint` together.
- - `Preload` alone.
- - All three classes (`Parser`, `Entrypoint`, `Preload`).
+- The plugin script must define specific classes:
+ 1. **Class `Parser`**: Handles `argparse.ArgumentParser` initialization.
+ 2. **Class `Entrypoint`**: Main execution logic (receives `args`, `parser`, and `connapp`).
+ 3. **Class `Preload`**: (Optional) For modifying core app behavior or registering hooks.
### Preload Modifications and Hooks
-
-In the `Preload` class of the plugin system, you have the ability to customize the behavior of existing classes and methods within the application through a robust hooking system. This documentation explains how to use the `modify`, `register_pre_hook`, and `register_post_hook` methods to tailor plugin functionality to your needs.
-
-#### Modifying Classes with `modify`
-The `modify` method allows you to alter instances of a class at the time they are created or after their creation. This is particularly useful for setting or modifying configuration settings, altering default behaviors, or adding new functionalities to existing classes without changing the original class definitions.
-
-- **Usage**: Modify a class to include additional configurations or changes
-- **Modify Method Signature**:
- - `modify(modification_method)`: A function that is invoked with an instance of the class as its argument. This function should perform any modifications directly on this instance.
-- **Modification Method Signature**:
- - **Arguments**:
- - `cls`: This function accepts a single argument, the class instance, which it then modifies.
- - **Modifiable Classes**:
- - `connapp.config`
- - `connapp.node`
- - `connapp.nodes`
- - `connapp.ai`
- - ```python
- def modify_config(cls):
- # Example modification: adding a new attribute or modifying an existing one
- cls.new_attribute = 'New Value'
-
- class Preload:
- def __init__(self, connapp):
- # Applying modification to the config class instance
- connapp.config.modify(modify_config)
- ```
-
-#### Implementing Method Hooks
-There are 2 methods that allows you to define custom logic to be executed before (`register_pre_hook`) or after (`register_post_hook`) the main logic of a method. This is particularly useful for logging, auditing, preprocessing inputs, postprocessing outputs or adding functionalities.
-
- - **Usage**: Register hooks to methods to execute additional logic before or after the main method execution.
-- **Registration Methods Signature**:
- - `register_pre_hook(pre_hook_method)`: A function that is invoked before the main method is executed. This function should do preprocessing of the arguments.
- - `register_post_hook(post_hook_method)`: A function that is invoked after the main method is executed. This function should do postprocessing of the outputs.
-- **Method Signatures for Pre-Hooks**
- - `pre_hook_method(*args, **kwargs)`
- - **Arguments**:
- - `*args`, `**kwargs`: The arguments and keyword arguments that will be passed to the method being hooked. The pre-hook function has the opportunity to inspect and modify these arguments before they are passed to the main method.
- - **Return**:
- - Must return a tuple `(args, kwargs)`, which will be used as the new arguments for the main method. If the original arguments are not modified, the function should return them as received.
-- **Method Signatures for Post-Hooks**:
- - `post_hook_method(*args, **kwargs)`
- - **Arguments**:
- - `*args`, `**kwargs`: The arguments and keyword arguments that were passed to the main method.
- - `kwargs["result"]`: The value returned by the main method. This allows the post-hook to inspect and even alter the result before it is returned to the original caller.
- - **Return**:
- - Can return a modified result, which will replace the original result of the main method, or simply return `kwargs["result"]` to return the original method result.
- - ```python
- def pre_processing_hook(*args, **kwargs):
- print("Pre-processing logic here")
- # Modify arguments or perform any checks
- return args, kwargs # Return modified or unmodified args and kwargs
-
- def post_processing_hook(*args, **kwargs):
- print("Post-processing logic here")
- # Modify the result or perform any final logging or cleanup
- return kwargs["result"] # Return the modified or unmodified result
-
- class Preload:
- def __init__(self, connapp):
- # Registering a pre-hook
- connapp.ai.some_method.register_pre_hook(pre_processing_hook)
-
- # Registering a post-hook
- connapp.node.another_method.register_post_hook(post_processing_hook)
- ```
-
-
-### Executable Block
-- The plugin script can include an executable block:
- - `if __name__ == "__main__":`
- - This block allows the plugin to be run as a standalone script for testing or independent use.
+You can customize the behavior of core classes using hooks:
+- **`modify(method)`**: Alter class instances (e.g., `connapp.config`, `connapp.ai`).
+- **`register_pre_hook(method)`**: Logic to run before a method execution.
+- **`register_post_hook(method)`**: Logic to run after a method execution.
### Command Completion Support
+Plugins can provide intelligent tab completion:
+1. **Tree-based Completion (Recommended)**: Define `_connpy_tree(info)` returning a navigation dictionary.
+2. **Legacy Completion**: Define `_connpy_completion(wordsnumber, words, info)`.
-Plugins can provide intelligent **tab completion** by defining autocompletion logic. There are two supported methods, with the tree-based approach being the most modern and recommended.
+---
-#### 1. Tree-based Completion (Recommended)
+## βοΈ gRPC Service Architecture
+Connpy can operate in a decoupled mode:
+1. **Start the API (Server)**: `conn api -s 50051`
+2. **Configure the Client**:
+ ```bash
+ conn config --service-mode remote
+ conn config --remote-host localhost:50051
+ ```
+All inventory management and execution will now happen on the server.
-Define a function called `_connpy_tree` that returns a declarative navigation tree. This method is highly efficient, supports complex state loops, and is very simple to implement for most use cases.
+---
+## π Automation Module (API)
+You can use `connpy` as a Python library for your own scripts.
+
+### Basic Execution
```python
-def _connpy_tree(info=None):
- nodes = info.get("nodes", [])
- return {
- "__exclude_used__": True, # Filter out words already typed
- "__extra__": nodes, # Suggest nodes at this level
- "--format": ["json", "yaml", "table"], # Fixed suggestions
- "*": { # Wildcard matches any positional word
- "interface1": None,
- "interface2": None,
- "--verbose": None
- }
- }
-```
-
-- **Keys**: Literal completions (exact matches).
-- **`*` Key**: A wildcard that matches any positional word typed by the user.
-- **`__extra__`**: A list or a callable `(words) -> list` that adds dynamic suggestions.
-- **`__exclude_used__`**: (Boolean) If True, automatically filters out words already present in the command line.
-
-#### 2. Legacy Function-based Completion
-
-For backward compatibility or highly custom logic, you can define `_connpy_completion`.
-
-```python
-def _connpy_completion(wordsnumber, words, info=None):
- if wordsnumber == 3:
- return ["--help", "--verbose", "start", "stop"]
-
- elif wordsnumber == 4 and words[2] == "start":
- return info["nodes"] # Suggest node names
-
- return []
-```
-
-| Parameter | Description |
-|----------------|-------------|
-| `wordsnumber` | Integer indicating the total number of words on the command line. For plugins, this typically starts at 3. |
-| `words` | A list of tokens (words) already typed. `words[0]` is always the name of the plugin. |
-| `info` | A dictionary of structured context data (`nodes`, `folders`, `profiles`, `config`). |
-
-> In this example, if the user types `connpy myplugin start ` and presses Tab, it will suggest node names.
-
-### Handling Unknown Arguments
-
-Plugins can choose to accept and process unknown arguments that are **not explicitly defined** in the parser. To enable this behavior, the plugin must define the following hidden argument in its `Parser` class:
-
-```
-self.parser.add_argument(
- "--unknown-args",
- action="store_true",
- default=True,
- help=argparse.SUPPRESS
-)
-```
-
-#### Behavior:
-
-- When this argument is present, Connpy will parse the known arguments and capture any extra (unknown) ones.
-- These unknown arguments will be passed to the plugin as `args.unknown_args` inside the `Entrypoint`.
-- If the user does not pass any unknown arguments, `args.unknown_args` will contain the default value (`True`, unless overridden).
-
-#### Example:
-
-If a plugin accepts unknown tcpdump flags like this:
-
-```
-connpy myplugin -nn -s0
-```
-
-And defines the hidden `--unknown-args` flag as shown above, then:
-
-- `args.unknown_args` inside `Entrypoint.__init__()` will be: `['-nn', '-s0']`
-
-> This allows the plugin to receive and process arguments intended for external tools (e.g., `tcpdump`) without argparse raising an error.
-
-#### Note:
-
-If a plugin does **not** define `--unknown-args`, any extra arguments passed will cause argparse to fail with an unrecognized arguments error.
-
-### Script Verification
-- The `verify_script` method in `plugins.py` is used to check the plugin script's compliance with these standards.
-- Non-compliant scripts will be rejected to ensure consistency and proper functionality within the plugin system.
-
-### Example Script
-
-For a practical example of how to write a compatible plugin script, please refer to the following example:
-
-[Example Plugin Script](https://github.com/fluzzi/awspy)
-
-This script demonstrates the required structure and implementation details according to the plugin system's standards.
-
-## Automation module usage
-### Standalone module
-```
import connpy
-router = connpy.node("uniqueName","ip/host", user="username", password="password")
-router.run(["term len 0","show run"])
+router = connpy.node("uniqueName", "1.1.1.1", user="admin")
+router.run(["show ip int brief"])
print(router.output)
-hasip = router.test("show ip int brief","1.1.1.1")
-if hasip:
- print("Router has ip 1.1.1.1")
-else:
- print("router does not have ip 1.1.1.1")
```
-### Using manager configuration
-```
-import connpy
-conf = connpy.configfile()
-device = conf.getitem("router@office")
-router = connpy.node("unique name", **device, config=conf)
-result = router.run("show ip int brief")
-print(result)
-```
-### Running parallel tasks on multiple devices
-```
-import connpy
-conf = connpy.configfile()
-#You can get the nodes from the config from a folder and fitlering in it
-nodes = conf.getitem("@office", ["router1", "router2", "router3"])
-#You can also get each node individually:
-nodes = {}
-nodes["router1"] = conf.getitem("router1@office")
-nodes["router2"] = conf.getitem("router2@office")
-nodes["router10"] = conf.getitem("router10@datacenter")
-#Also, you can create the nodes manually:
-nodes = {}
-nodes["router1"] = {"host": "1.1.1.1", "user": "user", "password": "password1"}
-nodes["router2"] = {"host": "1.1.1.2", "user": "user", "password": "password2"}
-nodes["router3"] = {"host": "1.1.1.2", "user": "user", "password": "password3"}
-#Finally you run some tasks on the nodes
-mynodes = connpy.nodes(nodes, config = conf)
-result = mynodes.test(["show ip int br"], "1.1.1.2")
-for i in result:
- print("---" + i + "---")
- print(result[i])
- print()
-# Or for one specific node
-mynodes.router1.run(["term len 0". "show run"], folder = "/home/user/logs")
-```
-### Using variables
-```
+### Parallel Tasks with Variables
+```python
import connpy
config = connpy.configfile()
-nodes = config.getitem("@office", ["router1", "router2", "router3"])
-commands = []
-commands.append("config t")
-commands.append("interface lo {id}")
-commands.append("ip add {ip} {mask}")
-commands.append("end")
-variables = {}
-variables["router1@office"] = {"ip": "10.57.57.1"}
-variables["router2@office"] = {"ip": "10.57.57.2"}
-variables["router3@office"] = {"ip": "10.57.57.3"}
-variables["__global__"] = {"id": "57"}
-variables["__global__"]["mask"] = "255.255.255.255"
-expected = "!"
-routers = connpy.nodes(nodes, config = config)
-routers.run(commands, variables)
-routers.test("ping {ip}", expected, variables)
-for key in routers.result:
- print(key, ' ---> ', ("pass" if routers.result[key] else "fail"))
+nodes = config.getitem("@office", ["router1", "router2"])
+routers = connpy.nodes(nodes, config=config)
+
+variables = {
+ "router1@office": {"id": "1"},
+ "__global__": {"mask": "255.255.255.0"}
+}
+routers.run(["interface lo{id}", "ip address 10.0.0.{id} {mask}"], variables)
```
-### Using AI
-The AI module uses a multi-agent architecture with an **Engineer** (fast execution) and an **Architect** (strategic reasoning). It supports any LLM provider through [litellm](https://github.com/BerriAI/litellm).
+
+### AI Programmatic Use
```python
import connpy
-conf = connpy.configfile()
-# Uses models and API keys from config, or override them:
-myai = connpy.ai(conf, engineer_model="gemini/gemini-2.5-flash", engineer_api_key="your-key")
-result = myai.ask("go to router1 and show me the running configuration")
-print(result["response"])
-# Streaming is enabled by default for CLI, disable for programmatic use:
-result = myai.ask("show interfaces on all routers", stream=False)
-print(result["response"])
+myai = connpy.ai(connpy.configfile())
+response = myai.ask("What is the status of the BGP neighbors in the office?")
```
-#### AI Plugin Tool Registration
-Plugins can extend the AI system by registering custom tools via the `Preload` class:
-```python
-def _register_my_tools(ai_instance):
- tool_def = {
- "type": "function",
- "function": {
- "name": "my_custom_tool",
- "description": "Does something useful.",
- "parameters": {
- "type": "object",
- "properties": {"query": {"type": "string"}},
- "required": ["query"]
- }
- }
- }
- ai_instance.register_ai_tool(
- tool_definition=tool_def,
- handler=my_handler_function,
- target="engineer", # or "architect" or "both"
- engineer_prompt="- My tool: does X.",
- architect_prompt=" * My tool (my_custom_tool)."
- )
-
-class Preload:
- def __init__(self, connapp):
- connapp.ai.modify(_register_my_tools)
-```
-## gRPC Service Architecture
-Connpy features a completely decoupled gRPC Client/Server architecture. You can run Connpy as a standalone background service and connect to it remotely via the CLI or other clients.
-
-### 1. Start the Server
-Start the gRPC service by running:
-```bash
-connpy api -s 50051
-```
-The server will handle all configurations, connections, AI sessions, and plugin execution locally on the machine it runs on.
-
-### 2. Connect the Client
-Configure your local CLI client to connect to the remote server:
-```bash
-connpy config --service-mode remote
-connpy config --remote-host localhost:50051
-```
-Once configured, all commands (`connpy node`, `connpy list`, `connpy ai`, etc.) will execute transparently on the remote server via thin-client proxies. You can revert back to standalone execution at any time by running `connpy config --service-mode local`.
-
-### Programmatic Access (gRPC & SOA)
-If you wish to build your own application (Web, Desktop, or Scripts) using the Connpy backend, you can use the `ServiceProvider` to interact with either a local or remote service transparently.
-
-```python
-import connpy
-from connpy.services.provider import ServiceProvider
-
-# Initialize local config
-config = connpy.configfile()
-
-# Connect to the remote gRPC service
-services = ServiceProvider(
- config,
- mode="remote",
- remote_host="localhost:50051"
-)
-
-# Use any service (the logic is identical to local mode)
-nodes = services.nodes.list_nodes()
-for name in nodes:
- print(f"Found node: {name}")
-
-# Run a command remotely via streaming
-for chunk in services.execution.run_commands(nodes=["server1"], commands=["uptime"]):
- print(chunk["output"], end="")
-```
-
-
+---
+*For detailed developer notes and plugin hooks documentation, see the [Documentation](https://fluzzi.github.io/connpy/).*
diff --git a/connpy/__init__.py b/connpy/__init__.py
index 50c00bc..6b200dd 100644
--- a/connpy/__init__.py
+++ b/connpy/__init__.py
@@ -1,476 +1,182 @@
#!/usr/bin/env python3
'''
-## Connection manager
+
+
+
-Connpy is a SSH, SFTP, Telnet, kubectl, Docker pod, and AWS SSM connection manager and automation module for Linux, Mac, and Docker.
-### Features
- - Manage connections using SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM.
- - Set contexts to manage specific nodes from specific contexts (work/home/clients/etc).
- - You can generate profiles and reference them from nodes using @profilename so you don't
- need to edit multiple nodes when changing passwords or other information.
- - Nodes can be stored on @folder or @subfolder@folder to organize your devices. They can
- be referenced using node@subfolder@folder or node@folder.
- - If you have too many nodes, get a completion script using: conn config --completion.
- Or use fzf by installing pyfzf and running conn config --fzf true.
- - Create in bulk, copy, move, export, and import nodes for easy management.
- - Run automation scripts on network devices.
- - Use AI with a multi-agent system (Engineer/Architect) to help you manage your devices.
- Supports any LLM provider via litellm (OpenAI, Anthropic, Google, etc.).
- - Add plugins with your own scripts, and execute them remotely.
- - Fully decoupled gRPC Client/Server architecture.
- - Unified UI with syntax highlighting and theming.
- - Much more!
+# Connpy
+[](https://pypi.org/pypi/connpy/)
+[](https://pypi.org/pypi/connpy/)
+[](https://github.com/fluzzi/connpy/blob/main/LICENSE)
+[](https://pypi.org/pypi/connpy/)
-### Usage
+**Connpy** is a powerful Connection Manager and Network Automation Platform for Linux, Mac, and Docker. It provides a unified interface for **SSH, SFTP, Telnet, kubectl, Docker pods, and AWS SSM**.
+
+The v6 release introduces the **AI Copilot**, an interactive terminal assistant that understands your network context and helps you manage your infrastructure more intelligently.
+
+
+## π€ AI Copilot (New in v6)
+The AI Copilot is deeply integrated into your terminal workflow:
+- **Terminal Context Awareness**: The Copilot can "see" your screen output, helping you diagnose errors or analyze command results in real-time.
+- **Hybrid Multi-Agent System**: Automatically escalates complex tasks between the **Network Engineer** (execution) and the **Network Architect** (strategy).
+- **MCP Integration**: Dynamically load tools from external providers (6WIND, AWS, etc.) via the Model Context Protocol.
+- **Interactive Chat**: Launch with `conn ai` for a collaborative troubleshooting session.
+
+
+## Core Features
+- **Multi-Protocol**: Native support for SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM.
+- **Context Management**: Set regex-based contexts to manage specific nodes across different environments (work, home, clients).
+- **Advanced Inventory**:
+ - Organize nodes in folders (`@folder`) and subfolders (`@subfolder@folder`).
+ - Use Global Profiles (`@profilename`) to manage shared credentials easily.
+ - Bulk creation, copying, moving, and export/import of nodes.
+- **Modern UI**: High-performance terminal experience with `prompt-toolkit`, including:
+ - Fuzzy search integration with `fzf`.
+ - Advanced tab completion.
+ - Syntax highlighting and customizable themes.
+- **Automation Engine**: Run parallel tasks and playbooks on multiple devices with variable support.
+- **Plugin System**: Build and execute custom Python scripts locally or on a remote gRPC server.
+- **gRPC Architecture**: Fully decoupled Client/Server model for distributed management.
+- **Privacy & Sync**: Local-first encrypted storage (RSA/OAEP) with optional Google Drive backup.
+
+
+## Installation
+
+```bash
+pip install connpy
```
+
+### Run it in Windows/Linux using Docker
+```bash
+git clone https://github.com/fluzzi/connpy
+cd connpy
+docker compose build
+
+# Run it like a native app (completely silent)
+docker compose --log-level ERROR run --rm --remove-orphans connpy-app [command]
+
+# Pro Tip: Add this alias for a 100% native experience from any folder
+alias conn='docker compose -f /path/to/connpy/docker-compose.yml --log-level ERROR run --rm --remove-orphans connpy-app'
+```
+
+---
+
+## π Privacy & Integration
+
+### Privacy Policy
+Connpy is committed to protecting your privacy:
+- **Local Storage**: All server addresses, usernames, and passwords are encrypted and stored **only** on your machine. No data is transmitted to our servers.
+- **Data Access**: Data is used solely for managing and automating your connections.
+
+### Google Integration
+Used strictly for backup:
+- **Backup**: Sync your encrypted configuration with your Google Drive account.
+- **Scoped Access**: Connpy only accesses its own backup files.
+
+---
+
+## Usage
+
+```bash
usage: conn [-h] [--add | --del | --mod | --show | --debug] [node|folder] [--sftp]
- conn {profile,move,mv,copy,cp,list,ls,bulk,export,import,ai,run,api,plugin,config,sync,context} ...
-
-positional arguments:
- node|folder node[@subfolder][@folder]
- Connect to specific node or show all matching nodes
- [@subfolder][@folder]
- Show all available connections globally or in specified path
-
-options:
- -h, --help show this help message and exit
- -v, --version Show version
- -a, --add Add new node[@subfolder][@folder] or [@subfolder]@folder
- -r, --del, --rm Delete node[@subfolder][@folder] or [@subfolder]@folder
- -e, --mod, --edit Modify node[@subfolder][@folder]
- -s, --show Show node[@subfolder][@folder]
- -d, --debug Display all conections steps
- -t, --sftp Connects using sftp instead of ssh
- --service-mode Set the backend service mode (local or remote)
- --remote Connect to a remote connpy service via gRPC
- --theme UI Output theme (dark, light, or path)
-
-Commands:
- profile Manage profiles
- move(mv) Move node
- copy(cp) Copy node
- list(ls) List profiles, nodes or folders
- bulk Add nodes in bulk
- export Export connection folder to Yaml file
- import Import connection folder to config from Yaml file
- ai Make request to an AI
- run Run scripts or commands on nodes
- api Start and stop connpy api
- plugin Manage plugins
- config Manage app config
- sync Sync config with Google
- context Manage contexts with regex matching
+ conn {profile,move,copy,list,bulk,export,import,ai,run,api,plugin,config,sync,context} ...
```
-### Manage profiles
-```
-usage: conn profile [-h] (--add | --del | --mod | --show) profile
+### Basic Examples:
+```bash
+# Add a folder and subfolder
+conn --add @office
+conn --add @datacenter@office
-positional arguments:
- profile Name of profile to manage
+# Add a node with a profile
+conn --add server1@datacenter@office --profile @myuser
-options:
- -h, --help show this help message and exit
- -a, --add Add new profile
- -r, --del, --rm Delete profile
- -e, --mod, --edit Modify profile
- -s, --show Show profile
+# Connect to a node (fuzzy match)
+conn server1
+# Start the AI Copilot
+conn ai
+
+# Run a command on all nodes in a folder
+conn run @office "uptime"
```
-### Examples
-```
- #Add new profile
- conn profile --add office-user
- #Add new folder
- conn --add @office
- #Add new subfolder
- conn --add @datacenter@office
- #Add node to subfolder
- conn --add server@datacenter@office
- #Add node to folder
- conn --add pc@office
- #Show node information
- conn --show server@datacenter@office
- #Connect to nodes
- conn pc@office
- conn server
- #Create and set new context
- conn context -a office .*@office
- conn context --set office
- #Run a command in a node
- conn run server ls -la
-```
+---
+
## Plugin Requirements for Connpy
### Remote Plugin Execution
When Connpy operates in remote mode, plugins are executed **transparently on the server**:
- The client automatically downloads the plugin source code (`Parser` class context) to generate the local `argparse` structure and provide autocompletion.
-- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory, ensuring the plugin runs securely against the server's inventory without passing sensitive data to the client.
-- You can manage remote plugins using the `--remote` flag (e.g. `connpy plugin --add myplugin script.py --remote`).
+- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory.
+- You can manage remote plugins using the `--remote` flag.
### General Structure
-- The plugin script must be a Python file.
-- Only the following top-level elements are allowed in the plugin script:
- - Class definitions
- - Function definitions
- - Import statements
- - The `if __name__ == "__main__":` block for standalone execution
- - Pass statements
-
-### Specific Class Requirements
-- The plugin script must define specific classes with particular attributes and methods. Each class serves a distinct role within the plugin's architecture:
- 1. **Class `Parser`**:
- - **Purpose**: Handles parsing of command-line arguments.
- - **Requirements**:
- - Must contain only one method: `__init__`.
- - The `__init__` method must initialize at least one attribute:
- - `self.parser`: An instance of `argparse.ArgumentParser`.
- 2. **Class `Entrypoint`**:
- - **Purpose**: Acts as the entry point for plugin execution, utilizing parsed arguments and integrating with the main application.
- - **Requirements**:
- - Must have an `__init__` method that accepts exactly three parameters besides `self`:
- - `args`: Arguments passed to the plugin.
- - The parser instance (typically `self.parser` from the `Parser` class).
- - The Connapp instance to interact with the Connpy app.
- 3. **Class `Preload`**:
- - **Purpose**: Performs any necessary preliminary setup or configuration independent of the main parsing and entry logic.
- - **Requirements**:
- - Contains at least an `__init__` method that accepts parameter connapp besides `self`.
-
-### Class Dependencies and Combinations
-- **Dependencies**:
- - `Parser` and `Entrypoint` are interdependent and must both be present if one is included.
- - `Preload` is independent and may exist alone or alongside the other classes.
-- **Valid Combinations**:
- - `Parser` and `Entrypoint` together.
- - `Preload` alone.
- - All three classes (`Parser`, `Entrypoint`, `Preload`).
+- The plugin script must define specific classes:
+ 1. **Class `Parser`**: Handles `argparse.ArgumentParser` initialization.
+ 2. **Class `Entrypoint`**: Main execution logic (receives `args`, `parser`, and `connapp`).
+ 3. **Class `Preload`**: (Optional) For modifying core app behavior or registering hooks.
### Preload Modifications and Hooks
-
-In the `Preload` class of the plugin system, you have the ability to customize the behavior of existing classes and methods within the application through a robust hooking system. This documentation explains how to use the `modify`, `register_pre_hook`, and `register_post_hook` methods to tailor plugin functionality to your needs.
-
-#### Modifying Classes with `modify`
-The `modify` method allows you to alter instances of a class at the time they are created or after their creation. This is particularly useful for setting or modifying configuration settings, altering default behaviors, or adding new functionalities to existing classes without changing the original class definitions.
-
-- **Usage**: Modify a class to include additional configurations or changes
-- **Modify Method Signature**:
- - `modify(modification_method)`: A function that is invoked with an instance of the class as its argument. This function should perform any modifications directly on this instance.
-- **Modification Method Signature**:
- - **Arguments**:
- - `cls`: This function accepts a single argument, the class instance, which it then modifies.
- - **Modifiable Classes**:
- - `connapp.config`
- - `connapp.node`
- - `connapp.nodes`
- - `connapp.ai`
- - ```python
- def modify_config(cls):
- # Example modification: adding a new attribute or modifying an existing one
- cls.new_attribute = 'New Value'
-
- class Preload:
- def __init__(self, connapp):
- # Applying modification to the config class instance
- connapp.config.modify(modify_config)
- ```
-
-#### Implementing Method Hooks
-There are 2 methods that allows you to define custom logic to be executed before (`register_pre_hook`) or after (`register_post_hook`) the main logic of a method. This is particularly useful for logging, auditing, preprocessing inputs, postprocessing outputs or adding functionalities.
-
- - **Usage**: Register hooks to methods to execute additional logic before or after the main method execution.
-- **Registration Methods Signature**:
- - `register_pre_hook(pre_hook_method)`: A function that is invoked before the main method is executed. This function should do preprocessing of the arguments.
- - `register_post_hook(post_hook_method)`: A function that is invoked after the main method is executed. This function should do postprocessing of the outputs.
-- **Method Signatures for Pre-Hooks**
- - `pre_hook_method(*args, **kwargs)`
- - **Arguments**:
- - `*args`, `**kwargs`: The arguments and keyword arguments that will be passed to the method being hooked. The pre-hook function has the opportunity to inspect and modify these arguments before they are passed to the main method.
- - **Return**:
- - Must return a tuple `(args, kwargs)`, which will be used as the new arguments for the main method. If the original arguments are not modified, the function should return them as received.
-- **Method Signatures for Post-Hooks**:
- - `post_hook_method(*args, **kwargs)`
- - **Arguments**:
- - `*args`, `**kwargs`: The arguments and keyword arguments that were passed to the main method.
- - `kwargs["result"]`: The value returned by the main method. This allows the post-hook to inspect and even alter the result before it is returned to the original caller.
- - **Return**:
- - Can return a modified result, which will replace the original result of the main method, or simply return `kwargs["result"]` to return the original method result.
- - ```python
- def pre_processing_hook(*args, **kwargs):
- print("Pre-processing logic here")
- # Modify arguments or perform any checks
- return args, kwargs # Return modified or unmodified args and kwargs
-
- def post_processing_hook(*args, **kwargs):
- print("Post-processing logic here")
- # Modify the result or perform any final logging or cleanup
- return kwargs["result"] # Return the modified or unmodified result
-
- class Preload:
- def __init__(self, connapp):
- # Registering a pre-hook
- connapp.ai.some_method.register_pre_hook(pre_processing_hook)
-
- # Registering a post-hook
- connapp.node.another_method.register_post_hook(post_processing_hook)
- ```
-
-### Executable Block
-- The plugin script can include an executable block:
- - `if __name__ == "__main__":`
- - This block allows the plugin to be run as a standalone script for testing or independent use.
+You can customize the behavior of core classes using hooks:
+- **`modify(method)`**: Alter class instances (e.g., `connapp.config`, `connapp.ai`).
+- **`register_pre_hook(method)`**: Logic to run before a method execution.
+- **`register_post_hook(method)`**: Logic to run after a method execution.
### Command Completion Support
+Plugins can provide intelligent tab completion:
+1. **Tree-based Completion (Recommended)**: Define `_connpy_tree(info)` returning a navigation dictionary.
+2. **Legacy Completion**: Define `_connpy_completion(wordsnumber, words, info)`.
-Plugins can provide intelligent **tab completion** by defining autocompletion logic. There are two supported methods, with the tree-based approach being the most modern and recommended.
+---
-#### 1. Tree-based Completion (Recommended)
+## βοΈ gRPC Service Architecture
+Connpy can operate in a decoupled mode:
+1. **Start the API (Server)**: `conn api -s 50051`
+2. **Configure the Client**:
+ ```bash
+ conn config --service-mode remote
+ conn config --remote-host localhost:50051
+ ```
+All inventory management and execution will now happen on the server.
-Define a function called `_connpy_tree` that returns a declarative navigation tree. This method is highly efficient, supports complex state loops, and is very simple to implement for most use cases.
+---
+## π Automation Module (API)
+You can use `connpy` as a Python library for your own scripts.
+
+### Basic Execution
```python
-def _connpy_tree(info=None):
- nodes = info.get("nodes", [])
- return {
- "__exclude_used__": True, # Filter out words already typed
- "__extra__": nodes, # Suggest nodes at this level
- "--format": ["json", "yaml", "table"], # Fixed suggestions
- "*": { # Wildcard matches any positional word
- "interface1": None,
- "interface2": None,
- "--verbose": None
- }
- }
-```
-
-- **Keys**: Literal completions (exact matches).
-- **`*` Key**: A wildcard that matches any positional word typed by the user.
-- **`__extra__`**: A list or a callable `(words) -> list` that adds dynamic suggestions.
-- **`__exclude_used__`**: (Boolean) If True, automatically filters out words already present in the command line.
-
-#### 2. Legacy Function-based Completion
-
-For backward compatibility or highly custom logic, you can define `_connpy_completion`.
-
-```python
-def _connpy_completion(wordsnumber, words, info=None):
- if wordsnumber == 3:
- return ["--help", "--verbose", "start", "stop"]
-
- elif wordsnumber == 4 and words[2] == "start":
- return info["nodes"] # Suggest node names
-
- return []
-```
-
-| Parameter | Description |
-|----------------|-------------|
-| `wordsnumber` | Integer indicating the total number of words on the command line. For plugins, this typically starts at 3. |
-| `words` | A list of tokens (words) already typed. `words[0]` is always the name of the plugin. |
-| `info` | A dictionary of structured context data (`nodes`, `folders`, `profiles`, `config`). |
-
-> In this example, if the user types `connpy myplugin start ` and presses Tab, it will suggest node names.
-
-### Handling Unknown Arguments
-
-Plugins can choose to accept and process unknown arguments that are **not explicitly defined** in the parser. To enable this behavior, the plugin must define the following hidden argument in its `Parser` class:
-
-```
-self.parser.add_argument(
- "--unknown-args",
- action="store_true",
- default=True,
- help=argparse.SUPPRESS
-)
-```
-
-#### Behavior:
-
-- When this argument is present, Connpy will parse the known arguments and capture any extra (unknown) ones.
-- These unknown arguments will be passed to the plugin as `args.unknown_args` inside the `Entrypoint`.
-- If the user does not pass any unknown arguments, `args.unknown_args` will contain the default value (`True`, unless overridden).
-
-#### Example:
-
-If a plugin accepts unknown tcpdump flags like this:
-
-```
-connpy myplugin -nn -s0
-```
-
-And defines the hidden `--unknown-args` flag as shown above, then:
-
-- `args.unknown_args` inside `Entrypoint.__init__()` will be: `['-nn', '-s0']`
-
-> This allows the plugin to receive and process arguments intended for external tools (e.g., `tcpdump`) without argparse raising an error.
-
-#### Note:
-
-If a plugin does **not** define `--unknown-args`, any extra arguments passed will cause argparse to fail with an unrecognized arguments error.
-
-### Script Verification
-- The `verify_script` method in `plugins.py` is used to check the plugin script's compliance with these standards.
-- Non-compliant scripts will be rejected to ensure consistency and proper functionality within the plugin system.
--
-### Example Script
-
-For a practical example of how to write a compatible plugin script, please refer to the following example:
-
-[Example Plugin Script](https://github.com/fluzzi/awspy)
-
-This script demonstrates the required structure and implementation details according to the plugin system's standards.
-
-## gRPC Service Architecture
-Connpy features a completely decoupled gRPC Client/Server architecture. You can run Connpy as a standalone background service and connect to it remotely via the CLI or other clients.
-
-### 1. Start the Server
-Start the gRPC service by running:
-```bash
-connpy api -s 50051
-```
-The server will handle all configurations, connections, AI sessions, and plugin execution locally on the machine it runs on.
-
-### 2. Connect the Client
-Configure your local CLI client to connect to the remote server:
-```bash
-connpy config --service-mode remote
-connpy config --remote-host localhost:50051
-```
-Once configured, all commands (`connpy node`, `connpy list`, `connpy ai`, etc.) will execute transparently on the remote server via thin-client proxies. You can revert back to standalone execution at any time by running `connpy config --service-mode local`.
-
-### Programmatic Access (gRPC & SOA)
-Developers can build their own applications using the Connpy backend by utilizing the `ServiceProvider`:
-
-```python
-from connpy.services.provider import ServiceProvider
-services = ServiceProvider(config, mode="remote", remote_host="localhost:50051")
-nodes = services.nodes.list_nodes()
-```
-
-
-## Automation module
-The automation module
-### Standalone module
-```
import connpy
-router = connpy.node("uniqueName","ip/host", user="user", password="pass")
-router.run(["term len 0","show run"])
+router = connpy.node("uniqueName", "1.1.1.1", user="admin")
+router.run(["show ip int brief"])
print(router.output)
-hasip = router.test("show ip int brief","1.1.1.1")
-if hasip:
- print("Router has ip 1.1.1.1")
-else:
- print("router does not have ip 1.1.1.1")
```
-### Using manager configuration
-```
-import connpy
-conf = connpy.configfile()
-device = conf.getitem("server@office")
-server = connpy.node("unique name", **device, config=conf)
-result = server.run(["cd /", "ls -la"])
-print(result)
-```
-### Running parallel tasks
-```
-import connpy
-conf = connpy.configfile()
-#You can get the nodes from the config from a folder and fitlering in it
-nodes = conf.getitem("@office", ["router1", "router2", "router3"])
-#You can also get each node individually:
-nodes = {}
-nodes["router1"] = conf.getitem("router1@office")
-nodes["router2"] = conf.getitem("router2@office")
-nodes["router10"] = conf.getitem("router10@datacenter")
-#Also, you can create the nodes manually:
-nodes = {}
-nodes["router1"] = {"host": "1.1.1.1", "user": "user", "password": "pass1"}
-nodes["router2"] = {"host": "1.1.1.2", "user": "user", "password": "pass2"}
-nodes["router3"] = {"host": "1.1.1.2", "user": "user", "password": "pass3"}
-#Finally you run some tasks on the nodes
-mynodes = connpy.nodes(nodes, config = conf)
-result = mynodes.test(["show ip int br"], "1.1.1.2")
-for i in result:
- print("---" + i + "---")
- print(result[i])
- print()
-# Or for one specific node
-mynodes.router1.run(["term len 0". "show run"], folder = "/home/user/logs")
-```
-### Using variables
-```
+### Parallel Tasks with Variables
+```python
import connpy
config = connpy.configfile()
-nodes = config.getitem("@office", ["router1", "router2", "router3"])
-commands = []
-commands.append("config t")
-commands.append("interface lo {id}")
-commands.append("ip add {ip} {mask}")
-commands.append("end")
-variables = {}
-variables["router1@office"] = {"ip": "10.57.57.1"}
-variables["router2@office"] = {"ip": "10.57.57.2"}
-variables["router3@office"] = {"ip": "10.57.57.3"}
-variables["__global__"] = {"id": "57"}
-variables["__global__"]["mask"] = "255.255.255.255"
-expected = "!"
-routers = connpy.nodes(nodes, config = config)
-routers.run(commands, variables)
-routers.test("ping {ip}", expected, variables)
-for key in routers.result:
- print(key, ' ---> ', ("pass" if routers.result[key] else "fail"))
-```
-### Using AI
+nodes = config.getitem("@office", ["router1", "router2"])
+routers = connpy.nodes(nodes, config=config)
+
+variables = {
+ "router1@office": {"id": "1"},
+ "__global__": {"mask": "255.255.255.0"}
+}
+routers.run(["interface lo{id}", "ip address 10.0.0.{id} {mask}"], variables)
```
+
+### AI Programmatic Use
+```python
import connpy
-conf = connpy.configfile()
-# Uses models and API keys from config, or override them:
-myai = connpy.ai(conf, engineer_model="gemini/gemini-2.5-flash", engineer_api_key="your-key")
-result = myai.ask("go to router1 and show me the running configuration")
-print(result["response"])
-# Streaming is enabled by default for CLI, disable for programmatic use:
-result = myai.ask("show interfaces on all routers", stream=False)
-print(result["response"])
+myai = connpy.ai(connpy.configfile())
+response = myai.ask("What is the status of the BGP neighbors in the office?")
```
-#### AI Plugin Tool Registration
-Plugins can register custom tools with the AI system using `register_ai_tool()` in their `Preload` class:
-```
-def _register_my_tools(ai_instance):
- tool_def = {
- "type": "function",
- "function": {
- "name": "my_custom_tool",
- "description": "Does something useful.",
- "parameters": {
- "type": "object",
- "properties": {"query": {"type": "string"}},
- "required": ["query"]
- }
- }
- }
- ai_instance.register_ai_tool(
- tool_definition=tool_def,
- handler=my_handler_function,
- target="engineer", # or "architect" or "both"
- engineer_prompt="- My tool: does X.",
- architect_prompt=" * My tool (my_custom_tool)."
- )
-
-class Preload:
- def __init__(self, connapp):
- connapp.ai.modify(_register_my_tools)
-```
-
-## Developer Notes (SOA Architecture)
-As of version 2.0, Connpy has migrated to a **Service-Oriented Architecture (SOA)**:
-- **`connpy/cli/`**: Contains all CLI handlers. These are responsible for argument parsing, user interaction (via `inquirer`), and visual output (via `printer`).
-- **`connpy/services/`**: Contains pure logic services (Node, Profile, Execution, etc.).
-- **Zero-Print Policy**: Services must never use `print()`. All output must be returned as data structures or generators to the caller (CLI handlers).
-- **ServiceProvider**: Access services via `connapp.services`. This allows transparent switching between local and remote (gRPC) backends without modifying CLI logic.
+---
+*For detailed developer notes and plugin hooks documentation, see the [Documentation](https://fluzzi.github.io/connpy/).*
'''
from .core import node,nodes
from .configfile import configfile
diff --git a/connpy/_version.py b/connpy/_version.py
index 2718f39..a33395c 100644
--- a/connpy/_version.py
+++ b/connpy/_version.py
@@ -1 +1 @@
-__version__ = "6.0.0b7"
+__version__ = "6.0.0b8"
diff --git a/connpy/ai.py b/connpy/ai.py
index 09d100d..e987b21 100755
--- a/connpy/ai.py
+++ b/connpy/ai.py
@@ -118,7 +118,7 @@ class ai:
aiconfig = self.config.config.get("ai", {})
# Modelos (Prioridad: Argumento -> Config -> Default)
- self.engineer_model = engineer_model or aiconfig.get("engineer_model") or "gemini/gemini-3.1-flash-lite-preview"
+ self.engineer_model = engineer_model or aiconfig.get("engineer_model") or "gemini/gemini-3.1-flash-lite"
self.architect_model = architect_model or aiconfig.get("architect_model") or "anthropic/claude-sonnet-4-6"
# API Keys (Prioridad: Argumento -> Config)
@@ -1303,6 +1303,8 @@ class ai:
node_info = node_info or {}
os_info = node_info.get("os", "unknown")
node_name = node_info.get("name", "unknown")
+ persona = node_info.get("persona", "engineer")
+ memories = node_info.get("memories", [])
vendor_reference = ""
if os_info and os_info != "unknown":
@@ -1315,7 +1317,31 @@ class ai:
except Exception:
pass
- system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session.
+ if persona == "architect":
+ system_prompt = f"""Role: NETWORK ARCHITECT. You act as a senior strategic advisor during a live SSH session.
+Rules:
+1. Answer the user's question directly based on the Terminal Context.
+2. Focus on the "why" and "how". Analyze topologies, design patterns, and validate configurations.
+3. Do NOT provide commands to execute unless specifically requested. Instead, explain the consequences and best practices.
+4. Keep your guide concise and authoritative.
+5. You MUST output your response in the following strict format:
+
+Your brief tactical guide in markdown.
+
+
+
+
+low
+
+6. Risk level is usually "low" for read-only/no commands.
+
+Terminal Context:
+{terminal_buffer}
+
+Device OS: {os_info}
+Node: {node_name}"""
+ else:
+ system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session.
Rules:
1. Answer the user's question directly based on the Terminal Context.
2. If the user asks you to analyze, parse, or extract data from the Terminal Context, DO IT directly in the section (you can use markdown tables or lists). Do NOT just give them a command to do it themselves.
@@ -1343,6 +1369,11 @@ Node: {node_name}"""
if vendor_reference:
system_prompt += f"\n\nVendor Command Reference:\n{vendor_reference}"
+ if memories:
+ system_prompt += "\n\nSession Memory (Important Facts):\n"
+ for m in memories:
+ system_prompt += f"- {m}\n"
+
# Fetch MCP tools for the current OS
mcp_tools = []
try:
@@ -1362,14 +1393,18 @@ Node: {node_name}"""
iteration = 0
max_iterations = 5 # Allow up to 5 iterations for tool usage
+ # Use models based on persona
+ current_model = self.architect_model if persona == "architect" else self.engineer_model
+ current_key = self.architect_key if persona == "architect" else self.engineer_key
+
try:
while iteration < max_iterations:
iteration += 1
response = await acompletion(
- model=self.engineer_model,
+ model=current_model,
messages=messages,
tools=mcp_tools if mcp_tools else None,
- api_key=self.engineer_key,
+ api_key=current_key,
stream=True
)
diff --git a/connpy/cli/run_handler.py b/connpy/cli/run_handler.py
index 1fd7b1f..5d92489 100644
--- a/connpy/cli/run_handler.py
+++ b/connpy/cli/run_handler.py
@@ -121,7 +121,7 @@ class RunHandler:
commands=commands,
variables=variables,
parallel=options.get("parallel", 10),
- timeout=options.get("timeout", 10),
+ timeout=options.get("timeout", 20),
folder=folder,
prompt=prompt,
on_node_complete=_on_run_complete
@@ -155,7 +155,7 @@ class RunHandler:
expected=expected,
variables=variables,
parallel=options.get("parallel", 10),
- timeout=options.get("timeout", 10),
+ timeout=options.get("timeout", 20),
folder=folder,
prompt=prompt,
on_node_complete=_on_test_complete
diff --git a/connpy/cli/terminal_ui.py b/connpy/cli/terminal_ui.py
index 3b66061..e80967c 100644
--- a/connpy/cli/terminal_ui.py
+++ b/connpy/cli/terminal_ui.py
@@ -1,6 +1,7 @@
import os
import re
import sys
+import time
import asyncio
import fcntl
import termios
@@ -22,19 +23,37 @@ from connpy.utils import log_cleaner
from ..services.ai_service import AIService
class CopilotInterface:
- def __init__(self, config, history=None, pt_input=None, pt_output=None, rich_file=None):
+ def __init__(self, config, history=None, pt_input=None, pt_output=None, rich_file=None, session_state=None):
self.config = config
self.history = history or InMemoryHistory()
self.pt_input = pt_input
self.pt_output = pt_output
self.ai_service = AIService(config)
-
+ self.session_state = session_state if session_state is not None else {
+ 'persona': 'engineer',
+ 'trust_mode': False,
+ 'memories': [],
+ 'os': None,
+ 'prompt': None
+ }
+
if rich_file:
self.console = Console(theme=connpy_theme, force_terminal=True, file=rich_file)
else:
self.console = Console(theme=connpy_theme)
-
- self.mode_range, self.mode_single, self.mode_lines = 0, 1, 2
+
+ self.mode_range, self.mode_single, self.mode_lines = 0, 1, 2
+
+ def _get_theme_color(self, style_name: str, fallback: str = "white") -> str:
+ """Extract Hex or ANSI color name from the active rich theme."""
+ try:
+ style = connpy_theme.styles.get(style_name)
+ if style and style.color:
+ # If it's a standard color like 'green', Rich might return its hex triplet
+ if style.color.is_default: return fallback
+ return style.color.triplet.hex if style.color.triplet else style.color.name
+ except: pass
+ return fallback
async def run_session(self,
raw_bytes: bytes,
@@ -60,7 +79,9 @@ class CopilotInterface:
'total_lines': len(buffer.split('\n')),
'context_lines': min(50, len(buffer.split('\n'))),
'context_mode': self.mode_range,
- 'cancelled': False
+ 'cancelled': False,
+ 'toolbar_msg': '',
+ 'msg_expiry': 0
}
# 1. Visual Separation
@@ -90,8 +111,13 @@ class CopilotInterface:
event.app.invalidate()
@bindings.add('tab')
def _(event):
- state['context_mode'] = (state['context_mode'] + 1) % 3
- event.app.invalidate()
+ buf = event.current_buffer
+ # If typing a slash command (no spaces yet), use tab to autocomplete inline
+ if buf.text.startswith('/') and ' ' not in buf.text:
+ buf.complete_next()
+ else:
+ state['context_mode'] = (state['context_mode'] + 1) % 3
+ event.app.invalidate()
@bindings.add('escape', eager=True)
@bindings.add('c-c')
def _(event):
@@ -111,154 +137,302 @@ class CopilotInterface:
return preview + "\n" + log_cleaner(active_raw.decode(errors='replace'))
def get_prompt_text():
+ import html
+ # Always use user_prompt color for the Ask prompt
+ color = self._get_theme_color("user_prompt", "cyan")
+
if state['context_mode'] == self.mode_lines:
- return HTML(f"Ask [Ctx: {state['context_lines']}/{state['total_lines']}L]: ")
+ text = html.escape(f"Ask [Ctx: {state['context_lines']}/{state['total_lines']}L]: ")
+ return HTML(f'')
active = get_active_buffer()
lines_count = len(active.split('\n'))
mode_str = {self.mode_range: "Range", self.mode_single: "Cmd"}[state['context_mode']]
- return HTML(f"Ask [{mode_str} {state['context_cmd']} ~{lines_count}L]: ")
+ text = html.escape(f"Ask [{mode_str} {state['context_cmd']} ~{lines_count}L]: ")
+ return HTML(f'')
+
+ from prompt_toolkit.application.current import get_app
def get_toolbar():
+ import html
+ app = get_app()
+ c_warning = self._get_theme_color("warning", "yellow")
+
+ if app and app.current_buffer:
+ text = app.current_buffer.text
+ # Solo mostrar ayuda de comandos si estamos escribiendo el primer comando y no hay espacios
+ if text.startswith('/') and ' ' not in text:
+ commands = ['/os', '/prompt', '/architect', '/engineer', '/trust', '/untrust', '/memorize', '/clear']
+ matches = [c for c in commands if c.startswith(text.lower())]
+ if matches:
+ m_text = html.escape(f"Available: {' '.join(matches)}")
+ return HTML(f'' + " " * 20)
+
m_label = {self.mode_range: "RANGE", self.mode_single: "SINGLE", self.mode_lines: "LINES"}[state['context_mode']]
if state['context_mode'] == self.mode_lines:
- return HTML(f"\u25b6 Ctrl+\u2191/\u2193 adjusts by 50 lines [Tab: {m_label}]")
- idx = max(0, state['total_cmds'] - state['context_cmd'])
- return HTML(f"\u25b6 {blocks[idx][1]} [Tab: {m_label}]")
+ base_str = f'\u25b6 Ctrl+\u2191/\u2193 adjusts by 50 lines [Tab: {m_label}]'
+ else:
+ idx = max(0, state['total_cmds'] - state['context_cmd'])
+ desc = blocks[idx][1]
+ base_str = f'\u25b6 {desc} [Tab: {m_label}]'
+
+ # Wrap base_str in a style to maintain consistency and avoid glitches
+ # The fg color will be inherited from bottom-toolbar global style if not specified here
+ base_html = f'{html.escape(base_str)}'
+
+ res_html = base_html
+ if state.get('toolbar_msg'):
+ if time.time() < state.get('msg_expiry', 0):
+ msg = html.escape(state['toolbar_msg'])
+ res_html = f' | ' + base_html
+ else:
+ state['toolbar_msg'] = ''
+
+ # Pad with spaces to ensure the line is cleared when the message disappears
+ return HTML(res_html + " " * 20)
- # 2. Ask question
- session = PromptSession(history=self.history)
- try:
- # Usamos un try/finally interno para asegurar que si algo falla en prompt_async,
- # no nos quedemos con la terminal en un estado extraΓ±o.
- question = await session.prompt_async(
- get_prompt_text,
- key_bindings=bindings,
- bottom_toolbar=get_toolbar
+ from prompt_toolkit.completion import Completer, Completion
+ class SlashCommandCompleter(Completer):
+ def get_completions(self, document, complete_event):
+ text = document.text_before_cursor
+ if text.startswith('/'):
+ parts = text.split()
+ # Only autocomplete the first word
+ if len(parts) <= 1 or (len(parts) == 1 and not text.endswith(' ')):
+ cmd_part = parts[0] if parts else text
+ commands = [
+ ('/os', 'Set device OS (e.g. cisco_ios)'),
+ ('/prompt', 'Override prompt regex'),
+ ('/architect', 'Switch to Architect persona'),
+ ('/engineer', 'Switch to Engineer persona'),
+ ('/trust', 'Enable auto-execute'),
+ ('/untrust', 'Disable auto-execute'),
+ ('/memorize', 'Add fact to memory'),
+ ('/clear', 'Clear memory')
+ ]
+ for cmd, desc in commands:
+ if cmd.startswith(cmd_part.lower()):
+ yield Completion(cmd, start_position=-len(cmd_part), display_meta=desc)
+
+ copilot_completer = SlashCommandCompleter()
+
+ while True:
+ # 2. Ask question
+ from prompt_toolkit.styles import Style
+ c_contrast = self._get_theme_color("contrast", "gray")
+ ui_style = Style.from_dict({
+ 'bottom-toolbar': f'fg:{c_contrast}',
+ })
+
+ session = PromptSession(
+ history=self.history,
+ input=self.pt_input,
+ output=self.pt_output,
+ completer=copilot_completer,
+ reserve_space_for_menu=0,
+ style=ui_style
)
- except (KeyboardInterrupt, EOFError):
- state['cancelled'] = True
- question = ""
-
- if state['cancelled'] or not question.strip() or question.strip().lower() == 'cancel':
- return "cancel", None, None
-
- # Enrich question
- past = self.history.get_strings()
- if len(past) > 1:
- history_text = "\n".join(f"- {q}" for q in past[-6:-1])
- question = f"Previous questions:\n{history_text}\n\nCurrent Question:\n{question}"
-
- # 3. AI Execution
- active_buffer = get_active_buffer()
- live_text = "Thinking..."
- panel = Panel(live_text, title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan")
-
- def on_chunk(text):
- nonlocal live_text
- if live_text == "Thinking...": live_text = ""
- live_text += text
-
- with Live(panel, console=self.console, refresh_per_second=10) as live:
- def update_live(t):
- live.update(Panel(Markdown(t), title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan"))
-
- wrapped_chunk = lambda t: (on_chunk(t), update_live(live_text))
-
- # Check for interruption during AI call
- ai_task = asyncio.create_task(on_ai_call(active_buffer, question, wrapped_chunk))
-
try:
- while not ai_task.done():
- await asyncio.sleep(0.05)
- result = await ai_task
- except asyncio.CancelledError:
+ # Usamos un try/finally interno para asegurar que si algo falla en prompt_async,
+ # no nos quedemos con la terminal en un estado extraΓ±o.
+ question = await session.prompt_async(
+ get_prompt_text,
+ key_bindings=bindings,
+ bottom_toolbar=get_toolbar
+ )
+ except (KeyboardInterrupt, EOFError):
+ state['cancelled'] = True
+ question = ""
+
+ if state['cancelled'] or not question.strip() or question.strip().lower() in ['cancel', 'exit', 'quit']:
return "cancel", None, None
- if not result or result.get("error"):
- if result and result.get("error"): self.console.print(f"[red]Error: {result['error']}[/red]")
- return "cancel", None, None
-
- # 4. Handle result
- if live_text == "Thinking..." and result.get("guide"):
- self.console.print(Panel(Markdown(result["guide"]), title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan"))
-
- commands = result.get("commands", [])
- if not commands:
- return "cancel", None, None
-
- risk = result.get("risk_level", "low")
- style = {"low": "green", "high": "yellow", "destructive": "red"}.get(risk, "green")
- cmd_text = "\n".join(f" {i+1}. {c}" for i, c in enumerate(commands))
- self.console.print(Panel(cmd_text, title=f"[bold {style}]Suggested Commands [{risk.upper()}][/bold {style}]", border_style=style))
-
- confirm_session = PromptSession()
- c_bindings = KeyBindings()
- @c_bindings.add('escape', eager=True)
- @c_bindings.add('c-c')
- def _(ev): ev.app.exit(result='n')
-
- try:
- action = await confirm_session.prompt_async(HTML(f"Send? (y/n/e/range) [n]: "), key_bindings=c_bindings)
- except (KeyboardInterrupt, EOFError):
- action = "n"
-
- def parse_indices(text, max_len):
- """Helper to parse '1-3, 5, 7' into [0, 1, 2, 4, 6]."""
- indices = []
- # Replace commas with spaces and split
- parts = text.replace(',', ' ').split()
- for part in parts:
- if '-' in part:
- try:
- start, end = map(int, part.split('-'))
- # Ensure inclusive and 0-indexed
- indices.extend(range(start-1, end))
- except: continue
- elif part.isdigit():
- indices.append(int(part)-1)
- # Filter valid indices and remove duplicates
- return [i for i in sorted(set(indices)) if 0 <= i < max_len]
-
- action_l = (action or "n").lower().strip()
- if action_l in ('y', 'yes', 'all'):
- return "send_all", commands, None
-
- # Check for numeric selection (e.g., "1, 2-4")
- if re.match(r'^[0-9,\-\s]+$', action_l):
- selected_idxs = parse_indices(action_l, len(commands))
- if selected_idxs:
- return "send_all", [commands[i] for i in selected_idxs], None
-
- elif action_l.startswith('e'):
- # Check if it's a selective edit like 'e1-2'
- selection_str = action_l[1:].strip()
- if selection_str:
- idxs = parse_indices(selection_str, len(commands))
- cmds_to_edit = [commands[i] for i in idxs] if idxs else commands
- else:
- cmds_to_edit = commands
-
- target = "\n".join(cmds_to_edit)
- e_bindings = KeyBindings()
- @e_bindings.add('c-j')
- def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
- @e_bindings.add('escape', 'enter')
- def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
- @e_bindings.add('escape')
- def _(ev): ev.app.exit(result='')
+ # 3. Process Input via AIService
+ directive = self.ai_service.process_copilot_input(question, self.session_state)
- edited = await confirm_session.prompt_async(
- HTML("Edit (Ctrl+Enter or Esc+Enter to submit):\n"),
- default=target, multiline=True, key_bindings=e_bindings
- )
- if edited.strip():
- # Split by lines to ensure core.py applies delay between each command
- lines = [l.strip() for l in edited.split('\n') if l.strip()]
- return "custom", None, lines
- return "cancel", None, None
+ if directive["action"] == "state_update":
+ state['toolbar_msg'] = directive['message']
+ state['msg_expiry'] = time.time() + 3 # 3 seconds timeout
+
+ async def delayed_refresh():
+ await asyncio.sleep(3.1)
+ # Only invalidate if the message hasn't been replaced by a newer one
+ if state.get('toolbar_msg') == directive['message']:
+ state['toolbar_msg'] = '' # Explicitly clear
+ try:
+ from prompt_toolkit.application.current import get_app
+ app = get_app()
+ if app: app.invalidate()
+ except: pass
+ asyncio.create_task(delayed_refresh())
+
+ # Mover el cursor arriba y limpiar la lΓnea para que el nuevo prompt reemplace al anterior
+ sys.stdout.write('\x1b[1A\x1b[2K')
+ sys.stdout.flush()
+ continue
+ else:
+ # Limpiar el mensaje de la barra cuando se hace una pregunta real
+ state['toolbar_msg'] = ''
+
+ clean_question = directive.get("clean_prompt", question)
+ overrides = directive.get("overrides", {})
+
+ # Merge node_info with session_state and overrides
+ merged_node_info = node_info.copy()
+ if self.session_state['os']: merged_node_info['os'] = self.session_state['os']
+ if self.session_state['prompt']: merged_node_info['prompt'] = self.session_state['prompt']
+ merged_node_info['persona'] = self.session_state['persona']
+ merged_node_info['trust'] = self.session_state['trust_mode']
+ merged_node_info['memories'] = list(self.session_state['memories'])
+
+ for k, v in overrides.items():
+ merged_node_info[k] = v
+
+ # Enrich question
+ past = self.history.get_strings()
+ if len(past) > 1:
+ clean_past = [q for q in past[-6:-1] if not q.startswith('/')]
+ if clean_past:
+ history_text = "\n".join(f"- {q}" for q in clean_past)
+ clean_question = f"Previous questions:\n{history_text}\n\nCurrent Question:\n{clean_question}"
+
+ # 3. AI Execution
+ # Use persona from overrides (one-shot) or from session state
+ active_persona = merged_node_info.get('persona', self.session_state.get('persona', 'engineer'))
+ persona_color = self._get_theme_color(active_persona, fallback="cyan")
+
+ active_buffer = get_active_buffer()
+ live_text = "Thinking..."
+ panel = Panel(live_text, title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color)
+
+ def on_chunk(text):
+ nonlocal live_text
+ if live_text == "Thinking...": live_text = ""
+ live_text += text
+
+ with Live(panel, console=self.console, refresh_per_second=10) as live:
+ def update_live(t):
+ live.update(Panel(Markdown(t), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color))
+
+ wrapped_chunk = lambda t: (on_chunk(t), update_live(live_text))
+
+ # Check for interruption during AI call
+ ai_task = asyncio.create_task(on_ai_call(active_buffer, clean_question, wrapped_chunk, merged_node_info))
+
+ try:
+ while not ai_task.done():
+ await asyncio.sleep(0.05)
+ result = await ai_task
+ except asyncio.CancelledError:
+ return "cancel", None, None
+
+ if not result or result.get("error"):
+ if result and result.get("error"): self.console.print(f"[red]Error: {result['error']}[/red]")
+ return "cancel", None, None
+
+ # 4. Handle result
+ if live_text == "Thinking..." and result.get("guide"):
+ self.console.print(Panel(Markdown(result["guide"]), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color))
+
+ commands = result.get("commands", [])
+ if not commands:
+ self.console.print("")
+ return "continue", None, None
+
+ risk = result.get("risk_level", "low")
+ risk_style = {"low": "success", "high": "warning", "destructive": "error"}.get(risk, "success")
+ style_color = self._get_theme_color(risk_style, fallback="green")
+
+ cmd_text = "\n".join(f" {i+1}. {c}" for i, c in enumerate(commands))
+ # Explicitly use 'bold style_color' for both TITLE and BORDER to ensure maximum consistency
+ self.console.print(Panel(cmd_text, title=f"[bold {style_color}]Suggested Commands [{risk.upper()}][/bold {style_color}]", border_style=f"bold {style_color}"))
+
+ if merged_node_info.get('trust', False) and risk != "destructive":
+ self.console.print(f"[dim]βοΈ Auto-executing (Trust Mode)[/dim]")
+ return "send_all", commands, None
+
+ confirm_session = PromptSession(input=self.pt_input, output=self.pt_output)
+ c_bindings = KeyBindings()
+ @c_bindings.add('escape', eager=True)
+ @c_bindings.add('c-c')
+ def _(ev): ev.app.exit(result='n')
+
+ import html
+ try:
+ p_text = html.escape(f"Send? (y/n/e/range) [n]: ")
+ # Use the EXACT same style_color and force bold="true" for Prompt-Toolkit
+ action = await confirm_session.prompt_async(HTML(f''), key_bindings=c_bindings)
+ except (KeyboardInterrupt, EOFError):
+ self.console.print("")
+ return "continue", None, None
+
+ def parse_indices(text, max_len):
+ """Helper to parse '1-3, 5, 7' into [0, 1, 2, 4, 6]."""
+ indices = []
+ # Replace commas with spaces and split
+ parts = text.replace(',', ' ').split()
+ for part in parts:
+ if '-' in part:
+ try:
+ start, end = map(int, part.split('-'))
+ # Ensure inclusive and 0-indexed
+ indices.extend(range(start-1, end))
+ except: continue
+ elif part.isdigit():
+ indices.append(int(part)-1)
+ # Filter valid indices and remove duplicates
+ return [i for i in sorted(set(indices)) if 0 <= i < max_len]
+
+ action_l = (action or "n").lower().strip()
+ if action_l in ('y', 'yes', 'all'):
+ return "send_all", commands, None
+
+ # Check for numeric selection (e.g., "1, 2-4")
+ if re.match(r'^[0-9,\-\s]+$', action_l):
+ selected_idxs = parse_indices(action_l, len(commands))
+ if selected_idxs:
+ return "send_all", [commands[i] for i in selected_idxs], None
+
+ elif action_l.startswith('e'):
+ # Check if it's a selective edit like 'e1-2'
+ selection_str = action_l[1:].strip()
+ if selection_str:
+ idxs = parse_indices(selection_str, len(commands))
+ cmds_to_edit = [commands[i] for i in idxs] if idxs else commands
+ else:
+ cmds_to_edit = commands
+
+ target = "\n".join(cmds_to_edit)
+ e_bindings = KeyBindings()
+ @e_bindings.add('c-j')
+ def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
+ @e_bindings.add('escape', 'enter')
+ def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
+ @e_bindings.add('escape')
+ def _(ev): ev.app.exit(result='')
+
+ c_edit = self._get_theme_color("user_prompt", "cyan")
+ import html
+ e_text = html.escape("Edit (Ctrl+Enter or Esc+Enter to submit):\n")
+ try:
+ edited = await confirm_session.prompt_async(
+ HTML(f''),
+ default=target, multiline=True, key_bindings=e_bindings
+ )
+ except (KeyboardInterrupt, EOFError):
+ self.console.print("")
+ return "continue", None, None
+
+ if edited and edited.strip():
+ # Split by lines to ensure core.py applies delay between each command
+ lines = [l.strip() for l in edited.split('\n') if l.strip()]
+ return "custom", None, lines
+
+ self.console.print("")
+ return "continue", None, None
return "cancel", None, None
finally:
+ state['cancelled'] = True
self.console.print("[dim]Returning to session...[/dim]")
diff --git a/connpy/completion.py b/connpy/completion.py
index 6de9e27..502b63d 100755
--- a/connpy/completion.py
+++ b/connpy/completion.py
@@ -169,11 +169,21 @@ def _build_tree(nodes, folders, profiles, plugins, configdir):
}
# State Machine Definitions
+ mcp_dict = {
+ "list": None,
+ "add": {"*": {"*": {"*": None}}}, # name url [os]
+ "remove": {"*": None},
+ "enable": {"*": None},
+ "disable": {"*": None},
+ "--help": None, "-h": None
+ }
+
ai_dict = {"__exclude_used__": True, "--help": None, "-h": None}
for opt in ["--engineer-model", "--engineer-api-key", "--architect-model", "--architect-api-key"]:
ai_dict[opt] = {"*": ai_dict} # takes value, loops back
for opt in ["--debug", "--trust", "--list", "--list-sessions", "--session", "--resume", "--delete", "--delete-session", "-y"]:
ai_dict[opt] = ai_dict # takes no value, loops back
+ ai_dict["--mcp"] = mcp_dict
ai_dict["*"] = ai_dict
mv_state = {"__extra__": _nodes, "--help": None, "-h": None}
diff --git a/connpy/connapp.py b/connpy/connapp.py
index 25793be..41a3f8d 100755
--- a/connpy/connapp.py
+++ b/connpy/connapp.py
@@ -89,6 +89,10 @@ class connapp:
if hasattr(self.services.nodes, "list_folders") and hasattr(self.services.nodes.list_folders, "register_post_hook"):
self.services.nodes.list_folders.register_post_hook(self.services.context.filter_node_list)
+ # Apply theme from config if exists before remote connection attempts
+ user_theme = self.config.config.get("theme", {})
+ self._apply_app_theme(user_theme)
+
# Populate data via services
try:
self.nodes_list = self.services.nodes.list_nodes()
@@ -151,10 +155,6 @@ class connapp:
return kwargs.get("result")
configfile._saveconfig.register_post_hook(auto_sync_hook)
-
- # Apply theme from config if exists
- user_theme = self.config.config.get("theme", {})
- self._apply_app_theme(user_theme)
def _apply_app_theme(self, styles):
"""Unified method to apply theme to printer and help formatter."""
diff --git a/connpy/core.py b/connpy/core.py
index ae9152a..c4dcad2 100755
--- a/connpy/core.py
+++ b/connpy/core.py
@@ -35,8 +35,6 @@ def copilot_terminal_mode():
new_settings[1] = new_settings[1] | termios.OPOST
termios.tcsetattr(fd, termios.TCSANOW, new_settings)
- yield
- except Exception:
yield
finally:
try:
@@ -610,20 +608,24 @@ class node:
async def handler(buffer, node_info, stream, child_fd, cmd_byte_positions=None):
try:
- interface = CopilotInterface(config, history=getattr(stream, 'copilot_history', None))
+ interface = CopilotInterface(
+ config,
+ history=getattr(stream, 'copilot_history', None),
+ session_state=getattr(stream, 'copilot_state', None)
+ )
# Save history back to stream for persistence in current session
stream.copilot_history = interface.history
+ stream.copilot_state = interface.session_state
ai_service = AIService(config)
- async def on_ai_call(active_buffer, question, chunk_callback):
+ async def on_ai_call(active_buffer, question, chunk_callback, merged_node_info):
return await ai_service.aask_copilot(
- active_buffer,
- question,
- node_info=node_info,
+ active_buffer,
+ question,
+ node_info=merged_node_info,
chunk_callback=chunk_callback
)
-
# Get raw bytes from BytesIO
raw_bytes = self.mylog.getvalue()
@@ -637,12 +639,16 @@ class node:
try:
with copilot_terminal_mode():
- action, commands, custom_cmd = await interface.run_session(
- raw_bytes=raw_bytes,
- cmd_byte_positions=cmd_byte_positions,
- node_info=node_info,
- on_ai_call=on_ai_call
- )
+ while True:
+ action, commands, custom_cmd = await interface.run_session(
+ raw_bytes=raw_bytes,
+ cmd_byte_positions=cmd_byte_positions,
+ node_info=node_info,
+ on_ai_call=on_ai_call
+ )
+ if action == "continue":
+ continue
+ break
finally:
# Reiniciar el lector de la terminal para volver al modo interactivo SSH/Telnet
if hasattr(stream, 'start_reading'):
diff --git a/connpy/grpc_layer/connpy_pb2.py b/connpy/grpc_layer/connpy_pb2.py
index a443c02..2c33451 100644
--- a/connpy/grpc_layer/connpy_pb2.py
+++ b/connpy/grpc_layer/connpy_pb2.py
@@ -26,7 +26,7 @@ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x63onnpy/proto/connpy.proto\x12\x06\x63onnpy\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xdc\x01\n\x0fInteractRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04sftp\x18\x02 \x01(\x08\x12\r\n\x05\x64\x65\x62ug\x18\x03 \x01(\x08\x12\x12\n\nstdin_data\x18\x04 \x01(\x0c\x12\x0c\n\x04\x63ols\x18\x05 \x01(\x05\x12\x0c\n\x04rows\x18\x06 \x01(\x05\x12\x1e\n\x16\x63onnection_params_json\x18\x07 \x01(\t\x12\x18\n\x10\x63opilot_question\x18\x08 \x01(\t\x12\x16\n\x0e\x63opilot_action\x18\t \x01(\t\x12\x1e\n\x16\x63opilot_context_buffer\x18\n \x01(\t\"\x86\x02\n\x10InteractResponse\x12\x13\n\x0bstdout_data\x18\x01 \x01(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\x12\x16\n\x0e\x63opilot_prompt\x18\x04 \x01(\x08\x12\x1e\n\x16\x63opilot_buffer_preview\x18\x05 \x01(\t\x12\x1d\n\x15\x63opilot_response_json\x18\x06 \x01(\t\x12\x1e\n\x16\x63opilot_node_info_json\x18\x07 \x01(\t\x12\x1c\n\x14\x63opilot_stream_chunk\x18\x08 \x01(\t\x12 \n\x18\x63opilot_injected_command\x18\t \x01(\t\"7\n\rFilterRequest\x12\x12\n\nfilter_str\x18\x01 \x01(\t\x12\x12\n\nformat_str\x18\x02 \x01(\t\"5\n\rValueResponse\x12$\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\"\x17\n\tIdRequest\x12\n\n\x02id\x18\x01 \x01(\t\"S\n\x0bNodeRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tis_folder\x18\x03 \x01(\x08\".\n\rDeleteRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tis_folder\x18\x02 \x01(\x08\"\x1d\n\x0cMessageValue\x12\r\n\x05value\x18\x01 \x01(\t\";\n\x0bMoveRequest\x12\x0e\n\x06src_id\x18\x01 \x01(\t\x12\x0e\n\x06\x64st_id\x18\x02 \x01(\t\x12\x0c\n\x04\x63opy\x18\x03 \x01(\x08\"W\n\x0b\x42ulkRequest\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05hosts\x18\x02 \x03(\t\x12,\n\x0b\x63ommon_data\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"7\n\x0eStructResponse\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"/\n\x0eProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07resolve\x18\x02 \x01(\x08\"6\n\rStructRequest\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x1e\n\rStringRequest\x12\r\n\x05value\x18\x01 \x01(\t\"\x1f\n\x0eStringResponse\x12\r\n\x05value\x18\x01 \x01(\t\"C\n\rUpdateRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\"B\n\rPluginRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\x0e\n\x06update\x18\x03 \x01(\x08\"\xa5\x01\n\nRunRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x0e\n\x06\x66older\x18\x03 \x01(\t\x12\x0e\n\x06prompt\x18\x04 \x01(\t\x12\x10\n\x08parallel\x18\x05 \x01(\x05\x12%\n\x04vars\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x07 \x01(\x05\x12\x0c\n\x04name\x18\x08 \x01(\t\"\xb8\x01\n\x0bTestRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x10\n\x08\x65xpected\x18\x03 \x03(\t\x12\x0e\n\x06\x66older\x18\x04 \x01(\t\x12\x0e\n\x06prompt\x18\x05 \x01(\t\x12\x10\n\x08parallel\x18\x06 \x01(\x05\x12%\n\x04vars\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x08 \x01(\x05\x12\x0c\n\x04name\x18\t \x01(\t\"A\n\rScriptRequest\x12\x0e\n\x06param1\x18\x01 \x01(\t\x12\x0e\n\x06param2\x18\x02 \x01(\t\x12\x10\n\x08parallel\x18\x03 \x01(\x05\"3\n\rExportRequest\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x0f\n\x07\x66olders\x18\x02 \x03(\t\"\x1c\n\x0bListRequest\x12\r\n\x05items\x18\x01 \x03(\t\"\xa6\x02\n\nAskRequest\x12\x12\n\ninput_text\x18\x01 \x01(\t\x12\x0e\n\x06\x64ryrun\x18\x02 \x01(\x08\x12,\n\x0c\x63hat_history\x18\x03 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x12\n\nsession_id\x18\x04 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x05 \x01(\x08\x12\x16\n\x0e\x65ngineer_model\x18\x06 \x01(\t\x12\x18\n\x10\x65ngineer_api_key\x18\x07 \x01(\t\x12\x17\n\x0f\x61rchitect_model\x18\x08 \x01(\t\x12\x19\n\x11\x61rchitect_api_key\x18\t \x01(\t\x12\r\n\x05trust\x18\n \x01(\x08\x12\x1b\n\x13\x63onfirmation_answer\x18\x0b \x01(\t\x12\x11\n\tinterrupt\x18\x0c \x01(\x08\"\xc8\x01\n\nAIResponse\x12\x12\n\ntext_chunk\x18\x01 \x01(\t\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12,\n\x0b\x66ull_result\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x15\n\rstatus_update\x18\x04 \x01(\t\x12\x15\n\rdebug_message\x18\x05 \x01(\t\x12\x1d\n\x15requires_confirmation\x18\x06 \x01(\x08\x12\x19\n\x11important_message\x18\x07 \x01(\t\"\x1d\n\x0c\x42oolResponse\x12\r\n\x05value\x18\x01 \x01(\x08\"C\n\x0fProviderRequest\x12\x10\n\x08provider\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\t\x12\x0f\n\x07\x61pi_key\x18\x03 \x01(\t\"\x1b\n\nIntRequest\x12\r\n\x05value\x18\x01 \x01(\x05\"p\n\rNodeRunResult\x12\x11\n\tunique_id\x18\x01 \x01(\t\x12\x0e\n\x06output\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12,\n\x0btest_result\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\"m\n\x12\x46ullReplaceRequest\x12,\n\x0b\x63onnections\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08profiles\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"X\n\x0e\x43opilotRequest\x12\x17\n\x0fterminal_buffer\x18\x01 \x01(\t\x12\x15\n\ruser_question\x18\x02 \x01(\t\x12\x16\n\x0enode_info_json\x18\x03 \x01(\t\"U\n\x0f\x43opilotResponse\x12\x10\n\x08\x63ommands\x18\x01 \x03(\t\x12\r\n\x05guide\x18\x02 \x01(\t\x12\x12\n\nrisk_level\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\"a\n\nMCPRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x03 \x01(\x08\x12\x17\n\x0f\x61uto_load_on_os\x18\x04 \x01(\t\x12\x0e\n\x06remove\x18\x05 \x01(\x08\x32\xe1\x07\n\x0bNodeService\x12<\n\nlist_nodes\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12>\n\x0clist_folders\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x10get_node_details\x12\x11.connpy.IdRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0e\x65xplode_unique\x12\x11.connpy.IdRequest\x1a\x15.connpy.ValueResponse\"\x00\x12\x42\n\x0egenerate_cache\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x61\x64\x64_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x0bupdate_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12>\n\x0b\x64\x65lete_node\x12\x15.connpy.DeleteRequest\x1a\x16.google.protobuf.Empty\"\x00\x12:\n\tmove_node\x12\x13.connpy.MoveRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x62ulk_add\x12\x13.connpy.BulkRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\x16validate_parent_folder\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x12H\n\rinteract_node\x12\x17.connpy.InteractRequest\x1a\x18.connpy.InteractResponse\"\x00(\x01\x30\x01\x12\x44\n\x0c\x66ull_replace\x12\x1a.connpy.FullReplaceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\rget_inventory\x12\x16.google.protobuf.Empty\x1a\x1a.connpy.FullReplaceRequest\"\x00\x32\x96\x03\n\x0eProfileService\x12?\n\rlist_profiles\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x0bget_profile\x12\x16.connpy.ProfileRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0b\x61\x64\x64_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11resolve_node_data\x12\x15.connpy.StructRequest\x1a\x16.connpy.StructResponse\"\x00\x12=\n\x0e\x64\x65lete_profile\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12?\n\x0eupdate_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\xae\x03\n\rConfigService\x12@\n\x0cget_settings\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StructResponse\"\x00\x12\x43\n\x0fget_default_dir\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StringResponse\"\x00\x12\x44\n\x11set_config_folder\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x41\n\x0eupdate_setting\x12\x15.connpy.UpdateRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10\x65ncrypt_password\x12\x15.connpy.StringRequest\x1a\x16.connpy.StringResponse\"\x00\x12H\n\x15\x61pply_theme_from_file\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xca\x02\n\rPluginService\x12?\n\x0clist_plugins\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12=\n\nadd_plugin\x12\x15.connpy.PluginRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\rdelete_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\renable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x0e\x64isable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x9b\x02\n\x10\x45xecutionService\x12=\n\x0crun_commands\x12\x12.connpy.RunRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12?\n\rtest_commands\x12\x13.connpy.TestRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12\x41\n\x0erun_cli_script\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x12\x44\n\x11run_yaml_playbook\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xe2\x01\n\x13ImportExportService\x12\x41\n\x0e\x65xport_to_file\x12\x15.connpy.ExportRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10import_from_file\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x8f\x04\n\tAIService\x12\x33\n\x03\x61sk\x12\x12.connpy.AskRequest\x1a\x12.connpy.AIResponse\"\x00(\x01\x30\x01\x12\x38\n\x07\x63onfirm\x12\x15.connpy.StringRequest\x1a\x14.connpy.BoolResponse\"\x00\x12@\n\x0b\x61sk_copilot\x12\x16.connpy.CopilotRequest\x1a\x17.connpy.CopilotResponse\"\x00\x12@\n\rlist_sessions\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12\x41\n\x0e\x64\x65lete_session\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x12\x63onfigure_provider\x12\x17.connpy.ProviderRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\rconfigure_mcp\x12\x12.connpy.MCPRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11load_session_data\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xc2\x02\n\rSystemService\x12\x39\n\tstart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\tdebug_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x08stop_api\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12;\n\x0brestart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12@\n\x0eget_api_status\x12\x16.google.protobuf.Empty\x1a\x14.connpy.BoolResponse\"\x00\x62\x06proto3')
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x63onnpy/proto/connpy.proto\x12\x06\x63onnpy\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xfc\x01\n\x0fInteractRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04sftp\x18\x02 \x01(\x08\x12\r\n\x05\x64\x65\x62ug\x18\x03 \x01(\x08\x12\x12\n\nstdin_data\x18\x04 \x01(\x0c\x12\x0c\n\x04\x63ols\x18\x05 \x01(\x05\x12\x0c\n\x04rows\x18\x06 \x01(\x05\x12\x1e\n\x16\x63onnection_params_json\x18\x07 \x01(\t\x12\x18\n\x10\x63opilot_question\x18\x08 \x01(\t\x12\x16\n\x0e\x63opilot_action\x18\t \x01(\t\x12\x1e\n\x16\x63opilot_context_buffer\x18\n \x01(\t\x12\x1e\n\x16\x63opilot_node_info_json\x18\r \x01(\t\"\x86\x02\n\x10InteractResponse\x12\x13\n\x0bstdout_data\x18\x01 \x01(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\x12\x16\n\x0e\x63opilot_prompt\x18\x04 \x01(\x08\x12\x1e\n\x16\x63opilot_buffer_preview\x18\x05 \x01(\t\x12\x1d\n\x15\x63opilot_response_json\x18\x06 \x01(\t\x12\x1e\n\x16\x63opilot_node_info_json\x18\x07 \x01(\t\x12\x1c\n\x14\x63opilot_stream_chunk\x18\x08 \x01(\t\x12 \n\x18\x63opilot_injected_command\x18\t \x01(\t\"7\n\rFilterRequest\x12\x12\n\nfilter_str\x18\x01 \x01(\t\x12\x12\n\nformat_str\x18\x02 \x01(\t\"5\n\rValueResponse\x12$\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\"\x17\n\tIdRequest\x12\n\n\x02id\x18\x01 \x01(\t\"S\n\x0bNodeRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tis_folder\x18\x03 \x01(\x08\".\n\rDeleteRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tis_folder\x18\x02 \x01(\x08\"\x1d\n\x0cMessageValue\x12\r\n\x05value\x18\x01 \x01(\t\";\n\x0bMoveRequest\x12\x0e\n\x06src_id\x18\x01 \x01(\t\x12\x0e\n\x06\x64st_id\x18\x02 \x01(\t\x12\x0c\n\x04\x63opy\x18\x03 \x01(\x08\"W\n\x0b\x42ulkRequest\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05hosts\x18\x02 \x03(\t\x12,\n\x0b\x63ommon_data\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"7\n\x0eStructResponse\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"/\n\x0eProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07resolve\x18\x02 \x01(\x08\"6\n\rStructRequest\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x1e\n\rStringRequest\x12\r\n\x05value\x18\x01 \x01(\t\"\x1f\n\x0eStringResponse\x12\r\n\x05value\x18\x01 \x01(\t\"C\n\rUpdateRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\"B\n\rPluginRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\x0e\n\x06update\x18\x03 \x01(\x08\"\xa5\x01\n\nRunRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x0e\n\x06\x66older\x18\x03 \x01(\t\x12\x0e\n\x06prompt\x18\x04 \x01(\t\x12\x10\n\x08parallel\x18\x05 \x01(\x05\x12%\n\x04vars\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x07 \x01(\x05\x12\x0c\n\x04name\x18\x08 \x01(\t\"\xb8\x01\n\x0bTestRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x10\n\x08\x65xpected\x18\x03 \x03(\t\x12\x0e\n\x06\x66older\x18\x04 \x01(\t\x12\x0e\n\x06prompt\x18\x05 \x01(\t\x12\x10\n\x08parallel\x18\x06 \x01(\x05\x12%\n\x04vars\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x08 \x01(\x05\x12\x0c\n\x04name\x18\t \x01(\t\"A\n\rScriptRequest\x12\x0e\n\x06param1\x18\x01 \x01(\t\x12\x0e\n\x06param2\x18\x02 \x01(\t\x12\x10\n\x08parallel\x18\x03 \x01(\x05\"3\n\rExportRequest\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x0f\n\x07\x66olders\x18\x02 \x03(\t\"\x1c\n\x0bListRequest\x12\r\n\x05items\x18\x01 \x03(\t\"\xa6\x02\n\nAskRequest\x12\x12\n\ninput_text\x18\x01 \x01(\t\x12\x0e\n\x06\x64ryrun\x18\x02 \x01(\x08\x12,\n\x0c\x63hat_history\x18\x03 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x12\n\nsession_id\x18\x04 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x05 \x01(\x08\x12\x16\n\x0e\x65ngineer_model\x18\x06 \x01(\t\x12\x18\n\x10\x65ngineer_api_key\x18\x07 \x01(\t\x12\x17\n\x0f\x61rchitect_model\x18\x08 \x01(\t\x12\x19\n\x11\x61rchitect_api_key\x18\t \x01(\t\x12\r\n\x05trust\x18\n \x01(\x08\x12\x1b\n\x13\x63onfirmation_answer\x18\x0b \x01(\t\x12\x11\n\tinterrupt\x18\x0c \x01(\x08\"\xc8\x01\n\nAIResponse\x12\x12\n\ntext_chunk\x18\x01 \x01(\t\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12,\n\x0b\x66ull_result\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x15\n\rstatus_update\x18\x04 \x01(\t\x12\x15\n\rdebug_message\x18\x05 \x01(\t\x12\x1d\n\x15requires_confirmation\x18\x06 \x01(\x08\x12\x19\n\x11important_message\x18\x07 \x01(\t\"\x1d\n\x0c\x42oolResponse\x12\r\n\x05value\x18\x01 \x01(\x08\"C\n\x0fProviderRequest\x12\x10\n\x08provider\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\t\x12\x0f\n\x07\x61pi_key\x18\x03 \x01(\t\"\x1b\n\nIntRequest\x12\r\n\x05value\x18\x01 \x01(\x05\"p\n\rNodeRunResult\x12\x11\n\tunique_id\x18\x01 \x01(\t\x12\x0e\n\x06output\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12,\n\x0btest_result\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\"m\n\x12\x46ullReplaceRequest\x12,\n\x0b\x63onnections\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08profiles\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"X\n\x0e\x43opilotRequest\x12\x17\n\x0fterminal_buffer\x18\x01 \x01(\t\x12\x15\n\ruser_question\x18\x02 \x01(\t\x12\x16\n\x0enode_info_json\x18\x03 \x01(\t\"U\n\x0f\x43opilotResponse\x12\x10\n\x08\x63ommands\x18\x01 \x03(\t\x12\r\n\x05guide\x18\x02 \x01(\t\x12\x12\n\nrisk_level\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\"a\n\nMCPRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x03 \x01(\x08\x12\x17\n\x0f\x61uto_load_on_os\x18\x04 \x01(\t\x12\x0e\n\x06remove\x18\x05 \x01(\x08\x32\xe1\x07\n\x0bNodeService\x12<\n\nlist_nodes\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12>\n\x0clist_folders\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x10get_node_details\x12\x11.connpy.IdRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0e\x65xplode_unique\x12\x11.connpy.IdRequest\x1a\x15.connpy.ValueResponse\"\x00\x12\x42\n\x0egenerate_cache\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x61\x64\x64_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x0bupdate_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12>\n\x0b\x64\x65lete_node\x12\x15.connpy.DeleteRequest\x1a\x16.google.protobuf.Empty\"\x00\x12:\n\tmove_node\x12\x13.connpy.MoveRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x62ulk_add\x12\x13.connpy.BulkRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\x16validate_parent_folder\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x12H\n\rinteract_node\x12\x17.connpy.InteractRequest\x1a\x18.connpy.InteractResponse\"\x00(\x01\x30\x01\x12\x44\n\x0c\x66ull_replace\x12\x1a.connpy.FullReplaceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\rget_inventory\x12\x16.google.protobuf.Empty\x1a\x1a.connpy.FullReplaceRequest\"\x00\x32\x96\x03\n\x0eProfileService\x12?\n\rlist_profiles\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x0bget_profile\x12\x16.connpy.ProfileRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0b\x61\x64\x64_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11resolve_node_data\x12\x15.connpy.StructRequest\x1a\x16.connpy.StructResponse\"\x00\x12=\n\x0e\x64\x65lete_profile\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12?\n\x0eupdate_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\xae\x03\n\rConfigService\x12@\n\x0cget_settings\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StructResponse\"\x00\x12\x43\n\x0fget_default_dir\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StringResponse\"\x00\x12\x44\n\x11set_config_folder\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x41\n\x0eupdate_setting\x12\x15.connpy.UpdateRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10\x65ncrypt_password\x12\x15.connpy.StringRequest\x1a\x16.connpy.StringResponse\"\x00\x12H\n\x15\x61pply_theme_from_file\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xca\x02\n\rPluginService\x12?\n\x0clist_plugins\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12=\n\nadd_plugin\x12\x15.connpy.PluginRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\rdelete_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\renable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x0e\x64isable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x9b\x02\n\x10\x45xecutionService\x12=\n\x0crun_commands\x12\x12.connpy.RunRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12?\n\rtest_commands\x12\x13.connpy.TestRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12\x41\n\x0erun_cli_script\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x12\x44\n\x11run_yaml_playbook\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xe2\x01\n\x13ImportExportService\x12\x41\n\x0e\x65xport_to_file\x12\x15.connpy.ExportRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10import_from_file\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x8f\x04\n\tAIService\x12\x33\n\x03\x61sk\x12\x12.connpy.AskRequest\x1a\x12.connpy.AIResponse\"\x00(\x01\x30\x01\x12\x38\n\x07\x63onfirm\x12\x15.connpy.StringRequest\x1a\x14.connpy.BoolResponse\"\x00\x12@\n\x0b\x61sk_copilot\x12\x16.connpy.CopilotRequest\x1a\x17.connpy.CopilotResponse\"\x00\x12@\n\rlist_sessions\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12\x41\n\x0e\x64\x65lete_session\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x12\x63onfigure_provider\x12\x17.connpy.ProviderRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\rconfigure_mcp\x12\x12.connpy.MCPRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11load_session_data\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xc2\x02\n\rSystemService\x12\x39\n\tstart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\tdebug_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x08stop_api\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12;\n\x0brestart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12@\n\x0eget_api_status\x12\x16.google.protobuf.Empty\x1a\x14.connpy.BoolResponse\"\x00\x62\x06proto3')
_globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
@@ -34,83 +34,83 @@ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'connpy.proto.connpy_pb2', _
if not _descriptor._USE_C_DESCRIPTORS:
DESCRIPTOR._loaded_options = None
_globals['_INTERACTREQUEST']._serialized_start=97
- _globals['_INTERACTREQUEST']._serialized_end=317
- _globals['_INTERACTRESPONSE']._serialized_start=320
- _globals['_INTERACTRESPONSE']._serialized_end=582
- _globals['_FILTERREQUEST']._serialized_start=584
- _globals['_FILTERREQUEST']._serialized_end=639
- _globals['_VALUERESPONSE']._serialized_start=641
- _globals['_VALUERESPONSE']._serialized_end=694
- _globals['_IDREQUEST']._serialized_start=696
- _globals['_IDREQUEST']._serialized_end=719
- _globals['_NODEREQUEST']._serialized_start=721
- _globals['_NODEREQUEST']._serialized_end=804
- _globals['_DELETEREQUEST']._serialized_start=806
- _globals['_DELETEREQUEST']._serialized_end=852
- _globals['_MESSAGEVALUE']._serialized_start=854
- _globals['_MESSAGEVALUE']._serialized_end=883
- _globals['_MOVEREQUEST']._serialized_start=885
- _globals['_MOVEREQUEST']._serialized_end=944
- _globals['_BULKREQUEST']._serialized_start=946
- _globals['_BULKREQUEST']._serialized_end=1033
- _globals['_STRUCTRESPONSE']._serialized_start=1035
- _globals['_STRUCTRESPONSE']._serialized_end=1090
- _globals['_PROFILEREQUEST']._serialized_start=1092
- _globals['_PROFILEREQUEST']._serialized_end=1139
- _globals['_STRUCTREQUEST']._serialized_start=1141
- _globals['_STRUCTREQUEST']._serialized_end=1195
- _globals['_STRINGREQUEST']._serialized_start=1197
- _globals['_STRINGREQUEST']._serialized_end=1227
- _globals['_STRINGRESPONSE']._serialized_start=1229
- _globals['_STRINGRESPONSE']._serialized_end=1260
- _globals['_UPDATEREQUEST']._serialized_start=1262
- _globals['_UPDATEREQUEST']._serialized_end=1329
- _globals['_PLUGINREQUEST']._serialized_start=1331
- _globals['_PLUGINREQUEST']._serialized_end=1397
- _globals['_RUNREQUEST']._serialized_start=1400
- _globals['_RUNREQUEST']._serialized_end=1565
- _globals['_TESTREQUEST']._serialized_start=1568
- _globals['_TESTREQUEST']._serialized_end=1752
- _globals['_SCRIPTREQUEST']._serialized_start=1754
- _globals['_SCRIPTREQUEST']._serialized_end=1819
- _globals['_EXPORTREQUEST']._serialized_start=1821
- _globals['_EXPORTREQUEST']._serialized_end=1872
- _globals['_LISTREQUEST']._serialized_start=1874
- _globals['_LISTREQUEST']._serialized_end=1902
- _globals['_ASKREQUEST']._serialized_start=1905
- _globals['_ASKREQUEST']._serialized_end=2199
- _globals['_AIRESPONSE']._serialized_start=2202
- _globals['_AIRESPONSE']._serialized_end=2402
- _globals['_BOOLRESPONSE']._serialized_start=2404
- _globals['_BOOLRESPONSE']._serialized_end=2433
- _globals['_PROVIDERREQUEST']._serialized_start=2435
- _globals['_PROVIDERREQUEST']._serialized_end=2502
- _globals['_INTREQUEST']._serialized_start=2504
- _globals['_INTREQUEST']._serialized_end=2531
- _globals['_NODERUNRESULT']._serialized_start=2533
- _globals['_NODERUNRESULT']._serialized_end=2645
- _globals['_FULLREPLACEREQUEST']._serialized_start=2647
- _globals['_FULLREPLACEREQUEST']._serialized_end=2756
- _globals['_COPILOTREQUEST']._serialized_start=2758
- _globals['_COPILOTREQUEST']._serialized_end=2846
- _globals['_COPILOTRESPONSE']._serialized_start=2848
- _globals['_COPILOTRESPONSE']._serialized_end=2933
- _globals['_MCPREQUEST']._serialized_start=2935
- _globals['_MCPREQUEST']._serialized_end=3032
- _globals['_NODESERVICE']._serialized_start=3035
- _globals['_NODESERVICE']._serialized_end=4028
- _globals['_PROFILESERVICE']._serialized_start=4031
- _globals['_PROFILESERVICE']._serialized_end=4437
- _globals['_CONFIGSERVICE']._serialized_start=4440
- _globals['_CONFIGSERVICE']._serialized_end=4870
- _globals['_PLUGINSERVICE']._serialized_start=4873
- _globals['_PLUGINSERVICE']._serialized_end=5203
- _globals['_EXECUTIONSERVICE']._serialized_start=5206
- _globals['_EXECUTIONSERVICE']._serialized_end=5489
- _globals['_IMPORTEXPORTSERVICE']._serialized_start=5492
- _globals['_IMPORTEXPORTSERVICE']._serialized_end=5718
- _globals['_AISERVICE']._serialized_start=5721
- _globals['_AISERVICE']._serialized_end=6248
- _globals['_SYSTEMSERVICE']._serialized_start=6251
- _globals['_SYSTEMSERVICE']._serialized_end=6573
+ _globals['_INTERACTREQUEST']._serialized_end=349
+ _globals['_INTERACTRESPONSE']._serialized_start=352
+ _globals['_INTERACTRESPONSE']._serialized_end=614
+ _globals['_FILTERREQUEST']._serialized_start=616
+ _globals['_FILTERREQUEST']._serialized_end=671
+ _globals['_VALUERESPONSE']._serialized_start=673
+ _globals['_VALUERESPONSE']._serialized_end=726
+ _globals['_IDREQUEST']._serialized_start=728
+ _globals['_IDREQUEST']._serialized_end=751
+ _globals['_NODEREQUEST']._serialized_start=753
+ _globals['_NODEREQUEST']._serialized_end=836
+ _globals['_DELETEREQUEST']._serialized_start=838
+ _globals['_DELETEREQUEST']._serialized_end=884
+ _globals['_MESSAGEVALUE']._serialized_start=886
+ _globals['_MESSAGEVALUE']._serialized_end=915
+ _globals['_MOVEREQUEST']._serialized_start=917
+ _globals['_MOVEREQUEST']._serialized_end=976
+ _globals['_BULKREQUEST']._serialized_start=978
+ _globals['_BULKREQUEST']._serialized_end=1065
+ _globals['_STRUCTRESPONSE']._serialized_start=1067
+ _globals['_STRUCTRESPONSE']._serialized_end=1122
+ _globals['_PROFILEREQUEST']._serialized_start=1124
+ _globals['_PROFILEREQUEST']._serialized_end=1171
+ _globals['_STRUCTREQUEST']._serialized_start=1173
+ _globals['_STRUCTREQUEST']._serialized_end=1227
+ _globals['_STRINGREQUEST']._serialized_start=1229
+ _globals['_STRINGREQUEST']._serialized_end=1259
+ _globals['_STRINGRESPONSE']._serialized_start=1261
+ _globals['_STRINGRESPONSE']._serialized_end=1292
+ _globals['_UPDATEREQUEST']._serialized_start=1294
+ _globals['_UPDATEREQUEST']._serialized_end=1361
+ _globals['_PLUGINREQUEST']._serialized_start=1363
+ _globals['_PLUGINREQUEST']._serialized_end=1429
+ _globals['_RUNREQUEST']._serialized_start=1432
+ _globals['_RUNREQUEST']._serialized_end=1597
+ _globals['_TESTREQUEST']._serialized_start=1600
+ _globals['_TESTREQUEST']._serialized_end=1784
+ _globals['_SCRIPTREQUEST']._serialized_start=1786
+ _globals['_SCRIPTREQUEST']._serialized_end=1851
+ _globals['_EXPORTREQUEST']._serialized_start=1853
+ _globals['_EXPORTREQUEST']._serialized_end=1904
+ _globals['_LISTREQUEST']._serialized_start=1906
+ _globals['_LISTREQUEST']._serialized_end=1934
+ _globals['_ASKREQUEST']._serialized_start=1937
+ _globals['_ASKREQUEST']._serialized_end=2231
+ _globals['_AIRESPONSE']._serialized_start=2234
+ _globals['_AIRESPONSE']._serialized_end=2434
+ _globals['_BOOLRESPONSE']._serialized_start=2436
+ _globals['_BOOLRESPONSE']._serialized_end=2465
+ _globals['_PROVIDERREQUEST']._serialized_start=2467
+ _globals['_PROVIDERREQUEST']._serialized_end=2534
+ _globals['_INTREQUEST']._serialized_start=2536
+ _globals['_INTREQUEST']._serialized_end=2563
+ _globals['_NODERUNRESULT']._serialized_start=2565
+ _globals['_NODERUNRESULT']._serialized_end=2677
+ _globals['_FULLREPLACEREQUEST']._serialized_start=2679
+ _globals['_FULLREPLACEREQUEST']._serialized_end=2788
+ _globals['_COPILOTREQUEST']._serialized_start=2790
+ _globals['_COPILOTREQUEST']._serialized_end=2878
+ _globals['_COPILOTRESPONSE']._serialized_start=2880
+ _globals['_COPILOTRESPONSE']._serialized_end=2965
+ _globals['_MCPREQUEST']._serialized_start=2967
+ _globals['_MCPREQUEST']._serialized_end=3064
+ _globals['_NODESERVICE']._serialized_start=3067
+ _globals['_NODESERVICE']._serialized_end=4060
+ _globals['_PROFILESERVICE']._serialized_start=4063
+ _globals['_PROFILESERVICE']._serialized_end=4469
+ _globals['_CONFIGSERVICE']._serialized_start=4472
+ _globals['_CONFIGSERVICE']._serialized_end=4902
+ _globals['_PLUGINSERVICE']._serialized_start=4905
+ _globals['_PLUGINSERVICE']._serialized_end=5235
+ _globals['_EXECUTIONSERVICE']._serialized_start=5238
+ _globals['_EXECUTIONSERVICE']._serialized_end=5521
+ _globals['_IMPORTEXPORTSERVICE']._serialized_start=5524
+ _globals['_IMPORTEXPORTSERVICE']._serialized_end=5750
+ _globals['_AISERVICE']._serialized_start=5753
+ _globals['_AISERVICE']._serialized_end=6280
+ _globals['_SYSTEMSERVICE']._serialized_start=6283
+ _globals['_SYSTEMSERVICE']._serialized_end=6605
# @@protoc_insertion_point(module_scope)
diff --git a/connpy/grpc_layer/server.py b/connpy/grpc_layer/server.py
index 0761943..e053f95 100644
--- a/connpy/grpc_layer/server.py
+++ b/connpy/grpc_layer/server.py
@@ -223,158 +223,138 @@ class NodeServicer(connpy_pb2_grpc.NodeServiceServicer):
copilot_node_info_json=node_info_json
))
- # 2. Await the question from client via the copilot_queue
- import threading
- def preload_ai_deps():
- try:
- import litellm
- except Exception:
- pass
- threading.Thread(target=preload_ai_deps, daemon=True).start()
-
- try:
- req_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=120)
- if "question" not in req_data or not req_data["question"] or req_data["question"] == "CANCEL":
- os.write(child_fd, b'\x15\r')
- return
- question = req_data["question"]
-
- context_buffer = req_data.get("context_buffer", "")
- if context_buffer.startswith('{"context_start_pos"'):
+ while True:
+ # 2. Await the question from client via the copilot_queue
+ import threading
+ def preload_ai_deps():
try:
- parsed = json.loads(context_buffer)
- start_pos = parsed["context_start_pos"]
- selected_raw = raw_bytes[start_pos:]
- context_buffer = n._logclean(selected_raw.decode(errors='replace'), var=True)
+ import litellm
except Exception:
- context_buffer = buffer
- elif not context_buffer:
- context_buffer = buffer
- except asyncio.TimeoutError:
- os.write(child_fd, b'\x15\r')
- return
+ pass
+ threading.Thread(target=preload_ai_deps, daemon=True).start()
- # 3. Call AI Service with streaming
- from ..services.ai_service import AIService
- service = AIService(self.service.config)
-
- def chunk_callback(chunk_text):
- if chunk_text:
- response_queue.put(connpy_pb2.InteractResponse(
- copilot_stream_chunk=chunk_text
- ))
-
- # Create a clean version of node_info for the AI to save tokens and match local CLI behavior
- ai_node_info = {k: v for k, v in node_info.items() if k not in ("context_blocks", "full_buffer")}
-
- ai_task = asyncio.create_task(service.aask_copilot(context_buffer, question, ai_node_info, chunk_callback=chunk_callback))
- wait_action_task = asyncio.create_task(remote_stream.copilot_queue.get())
-
- done, pending = await asyncio.wait(
- [ai_task, wait_action_task],
- return_when=asyncio.FIRST_COMPLETED
- )
-
- if wait_action_task in done:
- req_data = wait_action_task.result()
- ai_task.cancel()
- if req_data.get("question") == "CANCEL" or req_data.get("action") == "cancel":
- os.write(child_fd, b'\x15\r')
- return
- return
- else:
- wait_action_task.cancel()
- result = ai_task.result()
- if not result:
- os.write(child_fd, b'\x15\r')
- return
-
- # 4. Send response back to client
- response_queue.put(connpy_pb2.InteractResponse(
- copilot_response_json=json.dumps(result)
- ))
-
- # 5. Wait for user action
- try:
- action_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=60)
- if "action" not in action_data or not action_data["action"] or action_data["action"] == "cancel":
- os.write(child_fd, b'\x15\r')
- return
- action = action_data["action"]
- except asyncio.TimeoutError:
- os.write(child_fd, b'\x15\r')
- return
-
- if action == "send_all":
- commands = result.get("commands", [])
- os.write(child_fd, b'\x15') # Ctrl+U to clear line
- await asyncio.sleep(0.1)
-
- # Prepend screen length command to avoid pagination
- if "screen_length_command" in n.tags:
- os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode())
- response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"]))
- await asyncio.sleep(0.8)
-
- for cmd in commands:
- os.write(child_fd, (cmd + "\n").encode())
- response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd))
- await asyncio.sleep(0.8)
- elif action.startswith("custom:"):
- custom_cmds = action[7:]
- os.write(child_fd, b'\x15')
- await asyncio.sleep(0.1)
-
- # Prepend screen length command to avoid pagination
- if "screen_length_command" in n.tags:
- os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode())
- response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"]))
- await asyncio.sleep(0.8)
-
- for cmd in custom_cmds.split('\n'):
- if cmd.strip():
- os.write(child_fd, (cmd.strip() + "\n").encode())
- response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd.strip()))
- await asyncio.sleep(0.8)
- elif action not in ('cancel', 'n', 'no'):
- # Handle numbers and ranges like "1,2,4-6"
try:
- commands = result.get("commands", [])
- selected_indices = set()
- for part in action.split(','):
- part = part.strip()
- if not part: continue
- if '-' in part:
- start_str, end_str = part.split('-', 1)
- start = int(start_str) - 1
- end = int(end_str) - 1
- for i in range(start, end + 1):
- selected_indices.add(i)
- else:
- selected_indices.add(int(part) - 1)
-
- valid_indices = sorted([i for i in selected_indices if 0 <= i < len(commands)])
- if valid_indices:
- os.write(child_fd, b'\x15')
- await asyncio.sleep(0.1)
-
- # Prepend screen length command to avoid pagination
- if "screen_length_command" in n.tags:
- os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode())
- response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"]))
- await asyncio.sleep(0.8)
-
- for idx in valid_indices:
- os.write(child_fd, (commands[idx] + "\n").encode())
- response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=commands[idx]))
- await asyncio.sleep(0.8)
- else:
+ req_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=120)
+ if not req_data: return
+ if "question" not in req_data or not req_data["question"] or req_data["question"] == "CANCEL" or req_data.get("action") == "cancel":
os.write(child_fd, b'\x15\r')
- except (ValueError, IndexError):
+ return
+ question = req_data["question"]
+
+ merged_node_info_str = req_data.get("node_info_json", "")
+ if merged_node_info_str:
+ try:
+ merged_node_info = json.loads(merged_node_info_str)
+ node_info.update(merged_node_info)
+ except: pass
+
+ context_buffer = req_data.get("context_buffer", "")
+ if context_buffer.startswith('{"context_start_pos"'):
+ try:
+ parsed = json.loads(context_buffer)
+ start_pos = parsed["context_start_pos"]
+ selected_raw = raw_bytes[start_pos:]
+ context_buffer = n._logclean(selected_raw.decode(errors='replace'), var=True)
+ except Exception:
+ context_buffer = buffer
+ elif not context_buffer:
+ context_buffer = buffer
+ except asyncio.TimeoutError:
os.write(child_fd, b'\x15\r')
- else:
- # Cancelled or invalid action
- os.write(child_fd, b'\x15\r')
+ return
+
+ # 3. Call AI Service with streaming
+ from ..services.ai_service import AIService
+ service = AIService(self.service.config)
+
+ def chunk_callback(chunk_text):
+ if chunk_text:
+ response_queue.put(connpy_pb2.InteractResponse(
+ copilot_stream_chunk=chunk_text
+ ))
+
+ # Create a clean version of node_info for the AI to save tokens and match local CLI behavior
+ ai_node_info = {k: v for k, v in node_info.items() if k not in ("context_blocks", "full_buffer")}
+
+ ai_task = asyncio.create_task(service.aask_copilot(context_buffer, question, ai_node_info, chunk_callback=chunk_callback))
+ wait_action_task = asyncio.create_task(remote_stream.copilot_queue.get())
+
+ done, pending = await asyncio.wait(
+ [ai_task, wait_action_task],
+ return_when=asyncio.FIRST_COMPLETED
+ )
+
+ if wait_action_task in done:
+ req_data = wait_action_task.result()
+ ai_task.cancel()
+ if req_data.get("action") == "cancel" or req_data.get("question") == "CANCEL":
+ os.write(child_fd, b'\x15\r')
+ return
+ continue # Loop back instead of returning to keep session alive
+ else:
+ wait_action_task.cancel()
+ result = ai_task.result()
+ if not result:
+ os.write(child_fd, b'\x15\r')
+ return
+
+ # 4. Send response back to client
+ response_queue.put(connpy_pb2.InteractResponse(
+ copilot_response_json=json.dumps(result)
+ ))
+
+ # 5. Wait for user action
+ try:
+ action_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=60)
+ if not action_data: return
+ action = action_data.get("action", "cancel")
+
+ if action == "continue":
+ continue # Loop back for next question
+
+ if action == "cancel":
+ os.write(child_fd, b'\x15\r')
+ return
+ except asyncio.TimeoutError:
+ os.write(child_fd, b'\x15\r')
+ return
+
+ if action == "send_all":
+ commands = result.get("commands", [])
+ os.write(child_fd, b'\x15') # Ctrl+U to clear line
+ await asyncio.sleep(0.1)
+
+ # Prepend screen length command to avoid pagination
+ if "screen_length_command" in n.tags:
+ os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode())
+ response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"]))
+ await asyncio.sleep(0.8)
+
+ for cmd in commands:
+ os.write(child_fd, (cmd + "\n").encode())
+ response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd))
+ await asyncio.sleep(0.8)
+ return
+ elif action.startswith("custom:"):
+ custom_cmds = action[7:]
+ os.write(child_fd, b'\x15')
+ await asyncio.sleep(0.1)
+
+ # Prepend screen length command to avoid pagination
+ if "screen_length_command" in n.tags:
+ os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode())
+ response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"]))
+ await asyncio.sleep(0.8)
+
+ for cmd in custom_cmds.split('\n'):
+ if cmd.strip():
+ os.write(child_fd, (cmd.strip() + "\n").encode())
+ response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd.strip()))
+ await asyncio.sleep(0.8)
+ return
+ else:
+ os.write(child_fd, b'\x15\r')
+ return
asyncio.run(n._async_interact_loop(remote_stream, resize_callback, copilot_handler=remote_copilot_handler))
except Exception as e:
diff --git a/connpy/grpc_layer/stubs.py b/connpy/grpc_layer/stubs.py
index 2db457f..5250c5e 100644
--- a/connpy/grpc_layer/stubs.py
+++ b/connpy/grpc_layer/stubs.py
@@ -51,16 +51,22 @@ class NodeStub:
pause_generator()
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
- interface = CopilotInterface(self.config, history=getattr(self, 'copilot_history', None))
+ interface = CopilotInterface(
+ self.config,
+ history=getattr(self, 'copilot_history', None),
+ session_state=getattr(self, 'copilot_state', None)
+ )
self.copilot_history = interface.history
+ self.copilot_state = interface.session_state
node_info = json.loads(res.copilot_node_info_json) if res.copilot_node_info_json else {}
- async def on_ai_call_remote(active_buffer, question, chunk_callback):
+ async def on_ai_call_remote(active_buffer, question, chunk_callback, merged_node_info):
# Send request to server
request_queue.put(connpy_pb2.InteractRequest(
copilot_question=question,
- copilot_context_buffer=active_buffer
+ copilot_context_buffer=active_buffer,
+ copilot_node_info_json=json.dumps(merged_node_info)
))
# Wait for chunks from server
while True:
@@ -76,12 +82,20 @@ class NodeStub:
# Wrap in async loop
async def run_remote_copilot():
- return await interface.run_session(
- raw_bytes=bytes(client_buffer_bytes),
- cmd_byte_positions=cmd_byte_positions,
- node_info=node_info,
- on_ai_call=on_ai_call_remote
- )
+ while True:
+ action, commands, custom_cmd = await interface.run_session(
+ raw_bytes=bytes(client_buffer_bytes),
+ cmd_byte_positions=cmd_byte_positions,
+ node_info=node_info,
+ on_ai_call=on_ai_call_remote
+ )
+
+ if action == "continue":
+ # Send continue signal to server to loop back for another question
+ request_queue.put(connpy_pb2.InteractRequest(copilot_action="continue"))
+ continue
+
+ return action, commands, custom_cmd
with copilot_terminal_mode():
action, commands, custom_cmd = asyncio.run(run_remote_copilot())
diff --git a/connpy/printer.py b/connpy/printer.py
index 501b215..a9bfcaf 100644
--- a/connpy/printer.py
+++ b/connpy/printer.py
@@ -46,8 +46,9 @@ def _get_local():
_local.console = None
if not hasattr(_local, 'err_console'):
_local.err_console = None
- if not hasattr(_local, 'theme'):
- _local.theme = None
+ if not hasattr(_local, 'theme') or _local.theme is None:
+ from rich.theme import Theme
+ _local.theme = Theme(_global_active_styles)
return _local
def set_thread_stream(stream):
@@ -69,23 +70,45 @@ def get_original_stderr():
# Centralized design system
STYLES = {
- "info": "cyan",
- "warning": "yellow",
- "error": "red",
- "success": "green",
- "debug": "dim",
- "header": "bold cyan",
- "key": "bold cyan",
- "border": "cyan",
- "pass": "bold green",
- "fail": "bold red",
- "engineer": "blue",
- "architect": "medium_purple",
- "ai_status": "bold green",
- "user_prompt": "bold cyan",
- "unavailable": "orange3",
+ "info": "#00ffff", # Cyan
+ "warning": "#ffff00", # Yellow
+ "error": "#ff0000", # Red
+ "success": "#00ff00", # Green
+ "debug": "#888888",
+ "header": "bold #00ffff",
+ "key": "bold #00ffff",
+ "border": "#00ffff",
+ "pass": "bold #00ff00",
+ "fail": "bold #ff0000",
+ "engineer": "#5fafff", # Sky Blue (lighter than pure blue)
+ "architect": "#9370db", # Medium Purple
+ "ai_status": "bold #00ff00",
+ "user_prompt": "bold #00afd7", # Deep Sky Blue / Soft Cyan
+ "unavailable": "#d78700",
+ "contrast": "#bbbbbb",
}
+LIGHT_THEME = {
+ "info": "#00008b", # Navy Blue
+ "warning": "#d78700", # Orange
+ "error": "#cd0000", # Dark Red
+ "success": "#006400", # Dark Green
+ "debug": "#777777",
+ "header": "bold #00008b",
+ "key": "bold #00008b",
+ "border": "#00008b",
+ "pass": "bold #006400",
+ "fail": "bold #cd0000",
+ "engineer": "#00008b",
+ "architect": "#8b008b", # Dark Magenta
+ "ai_status": "bold #006400",
+ "user_prompt": "bold #00008b",
+ "unavailable": "#666666",
+ "contrast": "#777777",
+}
+
+_global_active_styles = STYLES.copy()
+
def _get_console():
local = _get_local()
@@ -171,7 +194,7 @@ def connpy_theme():
local = _get_local()
if local.theme is None:
from rich.theme import Theme
- local.theme = Theme(STYLES)
+ local.theme = Theme(_global_active_styles)
return local.theme
def apply_theme(user_styles=None):
@@ -179,6 +202,7 @@ def apply_theme(user_styles=None):
Updates the global console themes with user-defined styles.
If a style is missing in user_styles, it falls back to the default in STYLES.
"""
+ global _global_active_styles
local = _get_local()
from rich.theme import Theme
@@ -190,6 +214,7 @@ def apply_theme(user_styles=None):
if key in active_styles:
active_styles[key] = value
+ _global_active_styles = active_styles
local.theme = Theme(active_styles)
if local.console:
local.console.push_theme(local.theme)
@@ -202,10 +227,15 @@ def _format_multiline(tag, message, style=None):
message = str(message)
lines = message.splitlines()
if not lines:
- return f"[{style}]\\[{tag}][/{style}]" if style else f"\\[{tag}]"
+ if style:
+ return f"[{style}]\\[{tag}][/{style}]"
+ return f"\\[{tag}]"
# Apply style to the tag if provided
styled_tag = f"[{style}]\\[{tag}][/{style}]" if style else f"\\[{tag}]"
+ if style:
+ # Include brackets in the styling
+ styled_tag = f"[{style}]\\[{tag}][/{style}]"
formatted = [f"{styled_tag} {lines[0]}"]
# Indent subsequent lines
@@ -462,7 +492,7 @@ class _ThemeProxy:
local = _get_local()
if local.theme is None:
from rich.theme import Theme
- local.theme = Theme(STYLES)
+ local.theme = Theme(_global_active_styles)
return getattr(local.theme, name)
connpy_theme = _ThemeProxy()
diff --git a/connpy/proto/connpy.proto b/connpy/proto/connpy.proto
index 2c1a0ec..4ea99e3 100644
--- a/connpy/proto/connpy.proto
+++ b/connpy/proto/connpy.proto
@@ -95,6 +95,7 @@ message InteractRequest {
string copilot_question = 8;
string copilot_action = 9;
string copilot_context_buffer = 10;
+ string copilot_node_info_json = 13;
}
message InteractResponse {
diff --git a/connpy/services/ai_service.py b/connpy/services/ai_service.py
index 6d2b86e..b9eccd2 100644
--- a/connpy/services/ai_service.py
+++ b/connpy/services/ai_service.py
@@ -45,6 +45,65 @@ class AIService(BaseService):
blocks.append((pos, preview[:80]))
return blocks
+ def process_copilot_input(self, input_text: str, session_state: dict) -> dict:
+ """Parses slash commands and manages session state. Returns directive dict."""
+ text = input_text.strip()
+ if not text.startswith('/'):
+ return {"action": "execute", "clean_prompt": text, "overrides": {}}
+
+ parts = text.split(maxsplit=1)
+ cmd = parts[0].lower()
+ args = parts[1] if len(parts) > 1 else ""
+
+ # 1. State Commands (Persistent)
+ if cmd == "/os":
+ if args:
+ session_state['os'] = args
+ return {"action": "state_update", "message": f"OS context changed to {args}"}
+ elif cmd == "/prompt":
+ if args:
+ session_state['prompt'] = args
+ return {"action": "state_update", "message": f"Prompt regex changed to {args}"}
+ elif cmd == "/memorize":
+ if args:
+ session_state['memories'].append(args)
+ return {"action": "state_update", "message": f"Memory added: {args}"}
+ elif cmd == "/clear":
+ session_state['memories'] = []
+ return {"action": "state_update", "message": "Memory cleared"}
+
+ # 2. Hybrid Commands
+ elif cmd == "/architect":
+ if not args:
+ session_state['persona'] = 'architect'
+ return {"action": "state_update", "message": "Persona set to Architect"}
+ else:
+ return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "architect"}}
+
+ elif cmd == "/engineer":
+ if not args:
+ session_state['persona'] = 'engineer'
+ return {"action": "state_update", "message": "Persona set to Engineer"}
+ else:
+ return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "engineer"}}
+
+ elif cmd == "/trust":
+ if not args:
+ session_state['trust_mode'] = True
+ return {"action": "state_update", "message": "Auto-execute (trust) enabled for session"}
+ else:
+ return {"action": "execute", "clean_prompt": args, "overrides": {"trust": True}}
+
+ elif cmd == "/untrust":
+ if not args:
+ session_state['trust_mode'] = False
+ return {"action": "state_update", "message": "Auto-execute (trust) disabled for session"}
+ else:
+ return {"action": "execute", "clean_prompt": args, "overrides": {"trust": False}}
+
+ # Unknown command, execute normally
+ return {"action": "execute", "clean_prompt": text, "overrides": {}}
+
def ask(self, input_text, dryrun=False, chat_history=None, status=None, debug=False, session_id=None, console=None, chunk_callback=None, confirm_handler=None, trust=False, **overrides):
"""Send a prompt to the AI agent."""
from connpy.ai import ai
diff --git a/connpy/services/config_service.py b/connpy/services/config_service.py
index 3a63746..5f900bd 100644
--- a/connpy/services/config_service.py
+++ b/connpy/services/config_service.py
@@ -70,6 +70,10 @@ class ConfigService(BaseService):
if not isinstance(user_styles, dict):
raise InvalidConfigurationError("Theme file must be a YAML dictionary.")
+ # Support both direct styles and nested under 'theme' key
+ if "theme" in user_styles and isinstance(user_styles["theme"], dict):
+ user_styles = user_styles["theme"]
+
# Filter for valid styles only (prevent junk in config)
valid_styles = {k: v for k, v in user_styles.items() if k in STYLES}
diff --git a/connpy/tunnels.py b/connpy/tunnels.py
index a0ed142..1043738 100644
--- a/connpy/tunnels.py
+++ b/connpy/tunnels.py
@@ -162,13 +162,19 @@ class RemoteStream:
if req.cols > 0 and req.rows > 0:
if self.resize_callback:
self._loop.call_soon_threadsafe(self.resize_callback, req.rows, req.cols)
+ # Copilot dispatching
+ copilot_msg = {}
if getattr(req, "copilot_question", ""):
- self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, {
+ copilot_msg.update({
"question": req.copilot_question,
- "context_buffer": getattr(req, "copilot_context_buffer", "")
+ "context_buffer": getattr(req, "copilot_context_buffer", ""),
+ "node_info_json": getattr(req, "copilot_node_info_json", "")
})
if getattr(req, "copilot_action", ""):
- self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, {"action": req.copilot_action})
+ copilot_msg["action"] = req.copilot_action
+
+ if copilot_msg:
+ self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, copilot_msg)
if req.stdin_data:
self._loop.call_soon_threadsafe(self._reader_queue.put_nowait, req.stdin_data)
except Exception:
diff --git a/docker-compose.yml b/docker-compose.yml
index 88a622b..a0744ab 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,9 +1,17 @@
-version: "3.8"
services:
connpy-app:
build: .
- image: connpy-app
+ image: connpy:latest
+ container_name: connpy
+ # Fundamental para la interactividad de la terminal
+ stdin_open: true
+ tty: true
+ environment:
+ - TERM=xterm-256color
+ extra_hosts:
+ - "host.docker.internal:host-gateway"
volumes:
- - ./docker/connpy/:/app
- - ./docker/logs/:/logs
- - ./docker/ssh/:/root/.ssh/
+ - ./docker/config:/config
+ - ./docker/ssh:/root/.ssh
+ - /var/run/docker.sock:/var/run/docker.sock
+ # No definimos comando por defecto para que 'run' sea mΓ‘s natural
diff --git a/docker/connpy/.gitignore b/docker/config/.gitignore
similarity index 100%
rename from docker/connpy/.gitignore
rename to docker/config/.gitignore
diff --git a/dockerfile b/dockerfile
index a6baeae..fc4f68b 100644
--- a/dockerfile
+++ b/dockerfile
@@ -1,21 +1,65 @@
-# Use the official python image
+# connpy v6.0.0b8 - Modern Network Automation Environment (Local Build)
+FROM python:3.11-slim
-FROM python:3.11-alpine as connpy-app
+LABEL description="Connpy: AI-Driven Network Automation & Intelligence Platform"
+
+# ConfiguraciΓ³n de Terminal y Python
+ENV DEBIAN_FRONTEND=noninteractive \
+ PYTHONUNBUFFERED=1 \
+ TERM=xterm-256color
-# Set the entrypoint
-# Set the working directory
WORKDIR /app
-# Install any additional dependencies
-RUN apk update && apk add --no-cache openssh fzf fzf-tmux ncurses bash
-RUN pip3 install connpy
-RUN connpy config --configfolder /app
+# 1. Herramientas base del sistema
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ curl \
+ git \
+ openssh-client \
+ fzf \
+ ncurses-bin \
+ bash \
+ procps \
+ unzip \
+ ca-certificates \
+ gnupg \
+ iputils-ping \
+ telnet \
+ && rm -rf /var/lib/apt/lists/*
-#AUTH
-RUN ssh-keygen -A
-RUN mkdir /root/.ssh && \
- chmod 700 /root/.ssh
+# 2. Instalar Docker CLI (para el plugin de docker de connpy)
+RUN install -m 0755 -d /etc/apt/keyrings && \
+ curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg && \
+ echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null && \
+ apt-get update && apt-get install -y docker-ce-cli && \
+ rm -rf /var/lib/apt/lists/*
+# 3. Instalar Kubectl (para el plugin de k8s de connpy)
+RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl" && \
+ install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl && \
+ rm kubectl
-#Set the entrypoint
-ENTRYPOINT ["connpy"]
+# 4. Instalar AWS CLI y Session Manager Plugin (Universal x86_64/ARM64)
+RUN ARCH=$(uname -m) && \
+ if [ "$ARCH" = "x86_64" ]; then AWS_ARCH="x86_64"; else AWS_ARCH="aarch64"; fi && \
+ curl "https://awscli.amazonaws.com/awscli-exe-linux-$AWS_ARCH.zip" -o "awscliv2.zip" && \
+ unzip awscliv2.zip && ./aws/install && rm -rf awscliv2.zip aws/ && \
+ if [ "$ARCH" = "x86_64" ]; then \
+ curl "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_64bit/session-manager-plugin.deb" -o "ssm.deb"; \
+ else \
+ curl "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_arm64/session-manager-plugin.deb" -o "ssm.deb"; \
+ fi && \
+ dpkg -i ssm.deb && rm ssm.deb
+
+# 5. Copiar cΓ³digo local e instalar dependencias
+COPY . .
+RUN pip install --no-cache-dir --upgrade pip && \
+ pip install --no-cache-dir .
+
+# 6. ConfiguraciΓ³n de persistencia
+# Creamos la carpeta y el puntero .folder para que connpy use /config
+RUN mkdir -p /config /root/.ssh /root/.config/conn && chmod 700 /root/.ssh && \
+ echo -n "/config" > /root/.config/conn/.folder
+
+# Punto de entrada directo a connpy
+ENTRYPOINT ["conn"]
diff --git a/docs/connpy/cli/ai_handler.html b/docs/connpy/cli/ai_handler.html
index 1756f97..3f88ec2 100644
--- a/docs/connpy/cli/ai_handler.html
+++ b/docs/connpy/cli/ai_handler.html
@@ -3,7 +3,7 @@
-
+
connpy.cli.ai_handler API documentation
@@ -77,6 +77,9 @@ el.replaceWith(d);
except Exception as e:
printer.error(str(e))
return
+
+ if args.mcp is not None:
+ return self.configure_mcp(args)
# Determinar session_id para retomar
session_id = None
@@ -156,7 +159,7 @@ el.replaceWith(d);
try:
user_query = Prompt.ask("[user_prompt]User[/user_prompt]")
if not user_query.strip(): continue
- if user_query.lower() in ['exit', 'quit', 'bye']: break
+ if user_query.lower() in ['exit', 'quit', 'bye', 'cancel']: break
with console.status("[ai_status]Agent is thinking...") as status:
result = self.app.myai.ask(user_query, chat_history=history, status=status, debug=args.debug, trust=args.trust, **self.ai_overrides)
@@ -179,11 +182,245 @@ el.replaceWith(d);
console.print(f"[debug]Tokens: {u['total']} (Input: {u['input']}, Output: {u['output']})[/debug]")
except (KeyboardInterrupt, EOFError):
console.print("\n[dim]Session closed.[/dim]")
- break
+ break
+
+ def configure_mcp(self, args):
+ """Handle MCP server configuration via CLI tokens or interactive wizard."""
+ mcp_args = args.mcp
+
+ # 1. Non-interactive CLI Mode (if arguments are provided)
+ if mcp_args:
+ action = mcp_args[0].lower()
+
+ if action == "list":
+ settings = self.app.services.config_svc.get_settings()
+ mcp_servers = settings.get("ai", {}).get("mcp_servers", {})
+ if not mcp_servers:
+ printer.info("No MCP servers configured.")
+ else:
+ columns = ["Name", "URL", "Enabled", "Auto-load OS"]
+ rows = []
+ for name, cfg in mcp_servers.items():
+ rows.append([
+ name,
+ cfg.get("url", ""),
+ "[green]Yes[/green]" if cfg.get("enabled", True) else "[red]No[/red]",
+ cfg.get("auto_load_on_os", "Any")
+ ])
+ printer.table("Configured MCP Servers", columns, rows)
+ return
+
+ elif action == "add":
+ if len(mcp_args) < 3:
+ printer.error("Usage: connpy ai --mcp add <name> <url> [os_filter]")
+ return
+ name, url = mcp_args[1], mcp_args[2]
+ os_filter = mcp_args[3] if len(mcp_args) > 3 else None
+ try:
+ self.app.services.ai.configure_mcp(name, url=url, auto_load_on_os=os_filter)
+ printer.success(f"MCP server '{name}' added/updated.")
+ except Exception as e:
+ printer.error(str(e))
+ return
+
+ elif action == "remove":
+ if len(mcp_args) < 2:
+ printer.error("Usage: connpy ai --mcp remove <name>")
+ return
+ name = mcp_args[1]
+ try:
+ self.app.services.ai.configure_mcp(name, remove=True)
+ printer.success(f"MCP server '{name}' removed.")
+ except Exception as e:
+ printer.error(str(e))
+ return
+
+ elif action in ["enable", "disable"]:
+ if len(mcp_args) < 2:
+ printer.error(f"Usage: connpy ai --mcp {action} <name>")
+ return
+ name = mcp_args[1]
+ enabled = (action == "enable")
+ try:
+ self.app.services.ai.configure_mcp(name, enabled=enabled)
+ printer.success(f"MCP server '{name}' {'enabled' if enabled else 'disabled'}.")
+ except Exception as e:
+ printer.error(str(e))
+ return
+
+ else:
+ printer.error(f"Unknown MCP action: {action}")
+ printer.info("Available actions: list, add, remove, enable, disable")
+ return
+
+ # 2. Interactive Wizard Mode (if no arguments provided)
+ # Import forms dynamically to avoid circular dependencies if any
+ if not hasattr(self.app, "cli_forms"):
+ from .forms import Forms
+ self.app.cli_forms = Forms(self.app)
+
+ settings = self.app.services.config_svc.get_settings()
+ mcp_servers = settings.get("ai", {}).get("mcp_servers", {})
+
+ result = self.app.cli_forms.mcp_wizard(mcp_servers)
+ if not result:
+ return
+
+ action = result["action"]
+ try:
+ if action == "list":
+ # Recursive call to the non-interactive list logic
+ args.mcp = ["list"]
+ return self.configure_mcp(args)
+
+ elif action == "add":
+ self.app.services.ai.configure_mcp(
+ result["name"],
+ url=result["url"],
+ enabled=result["enabled"],
+ auto_load_on_os=result["os"]
+ )
+ printer.success(f"MCP server '{result['name']}' saved.")
+
+ elif action == "update": # Used for toggle
+ self.app.services.ai.configure_mcp(
+ result["name"],
+ enabled=result["enabled"]
+ )
+ printer.success(f"MCP server '{result['name']}' updated.")
+
+ elif action == "remove":
+ self.app.services.ai.configure_mcp(result["name"], remove=True)
+ printer.success(f"MCP server '{result['name']}' removed.")
+
+ except Exception as e:
+ printer.error(str(e))
Methods
+
+def configure_mcp(self, args)
+
+
+
+
+Expand source code
+
+
def configure_mcp(self, args):
+ """Handle MCP server configuration via CLI tokens or interactive wizard."""
+ mcp_args = args.mcp
+
+ # 1. Non-interactive CLI Mode (if arguments are provided)
+ if mcp_args:
+ action = mcp_args[0].lower()
+
+ if action == "list":
+ settings = self.app.services.config_svc.get_settings()
+ mcp_servers = settings.get("ai", {}).get("mcp_servers", {})
+ if not mcp_servers:
+ printer.info("No MCP servers configured.")
+ else:
+ columns = ["Name", "URL", "Enabled", "Auto-load OS"]
+ rows = []
+ for name, cfg in mcp_servers.items():
+ rows.append([
+ name,
+ cfg.get("url", ""),
+ "[green]Yes[/green]" if cfg.get("enabled", True) else "[red]No[/red]",
+ cfg.get("auto_load_on_os", "Any")
+ ])
+ printer.table("Configured MCP Servers", columns, rows)
+ return
+
+ elif action == "add":
+ if len(mcp_args) < 3:
+ printer.error("Usage: connpy ai --mcp add <name> <url> [os_filter]")
+ return
+ name, url = mcp_args[1], mcp_args[2]
+ os_filter = mcp_args[3] if len(mcp_args) > 3 else None
+ try:
+ self.app.services.ai.configure_mcp(name, url=url, auto_load_on_os=os_filter)
+ printer.success(f"MCP server '{name}' added/updated.")
+ except Exception as e:
+ printer.error(str(e))
+ return
+
+ elif action == "remove":
+ if len(mcp_args) < 2:
+ printer.error("Usage: connpy ai --mcp remove <name>")
+ return
+ name = mcp_args[1]
+ try:
+ self.app.services.ai.configure_mcp(name, remove=True)
+ printer.success(f"MCP server '{name}' removed.")
+ except Exception as e:
+ printer.error(str(e))
+ return
+
+ elif action in ["enable", "disable"]:
+ if len(mcp_args) < 2:
+ printer.error(f"Usage: connpy ai --mcp {action} <name>")
+ return
+ name = mcp_args[1]
+ enabled = (action == "enable")
+ try:
+ self.app.services.ai.configure_mcp(name, enabled=enabled)
+ printer.success(f"MCP server '{name}' {'enabled' if enabled else 'disabled'}.")
+ except Exception as e:
+ printer.error(str(e))
+ return
+
+ else:
+ printer.error(f"Unknown MCP action: {action}")
+ printer.info("Available actions: list, add, remove, enable, disable")
+ return
+
+ # 2. Interactive Wizard Mode (if no arguments provided)
+ # Import forms dynamically to avoid circular dependencies if any
+ if not hasattr(self.app, "cli_forms"):
+ from .forms import Forms
+ self.app.cli_forms = Forms(self.app)
+
+ settings = self.app.services.config_svc.get_settings()
+ mcp_servers = settings.get("ai", {}).get("mcp_servers", {})
+
+ result = self.app.cli_forms.mcp_wizard(mcp_servers)
+ if not result:
+ return
+
+ action = result["action"]
+ try:
+ if action == "list":
+ # Recursive call to the non-interactive list logic
+ args.mcp = ["list"]
+ return self.configure_mcp(args)
+
+ elif action == "add":
+ self.app.services.ai.configure_mcp(
+ result["name"],
+ url=result["url"],
+ enabled=result["enabled"],
+ auto_load_on_os=result["os"]
+ )
+ printer.success(f"MCP server '{result['name']}' saved.")
+
+ elif action == "update": # Used for toggle
+ self.app.services.ai.configure_mcp(
+ result["name"],
+ enabled=result["enabled"]
+ )
+ printer.success(f"MCP server '{result['name']}' updated.")
+
+ elif action == "remove":
+ self.app.services.ai.configure_mcp(result["name"], remove=True)
+ printer.success(f"MCP server '{result['name']}' removed.")
+
+ except Exception as e:
+ printer.error(str(e))
+
+
Handle MCP server configuration via CLI tokens or interactive wizard.
+
def dispatch(self, args)
@@ -210,6 +447,9 @@ el.replaceWith(d);
except Exception as e:
printer.error(str(e))
return
+
+ if args.mcp is not None:
+ return self.configure_mcp(args)
# Determinar session_id para retomar
session_id = None
@@ -283,7 +523,7 @@ el.replaceWith(d);
try:
user_query = Prompt.ask("[user_prompt]User[/user_prompt]")
if not user_query.strip(): continue
- if user_query.lower() in ['exit', 'quit', 'bye']: break
+ if user_query.lower() in ['exit', 'quit', 'bye', 'cancel']: break
with console.status("[ai_status]Agent is thinking...") as status:
result = self.app.myai.ask(user_query, chat_history=history, status=status, debug=args.debug, trust=args.trust, **self.ai_overrides)
@@ -356,6 +596,7 @@ el.replaceWith(d);