From 12543c683ec2290d8e254b3390f04df60a1c9a87 Mon Sep 17 00:00:00 2001 From: Fede Luzzi Date: Wed, 13 May 2026 14:16:14 -0300 Subject: [PATCH] 1. Persistence Setup: Optimized the dockerfile to manually create the /root/.config/conn/.folder file pointing to /config. This avoids running the conn command during the build process and ensures a cleaner setup. 2. Copilot UI Fix: Resolved a double-escaping bug in the terminal bottom bar. Device prompts (like 6WIND-PE1>) will now render correctly instead of showing HTML entities like >. 3. AI Model Update: Updated the default engineer model in connpy/ai.py to gemini/gemini-3.1-flash-lite, removing the deprecated -preview suffix. 4. Standardized Timeouts: Unified all default timeouts to 20 seconds across the board. This includes direct execution (run/test), modern playbooks (v2), and classic task-based playbooks (v1). 5. Documentation Update: Regenerated the full documentation site in the docs/ directory using pdoc to reflect the latest changes. 6. Cleanup: Removed all debug prints from connpy/core.py and restored the docker/logs/.gitignore file. --- .dockerignore | 22 + .gitignore | 4 + MANIFEST.in | 8 + README.md | 595 ++---- connpy/__init__.py | 572 ++---- connpy/_version.py | 2 +- connpy/ai.py | 43 +- connpy/cli/run_handler.py | 4 +- connpy/cli/terminal_ui.py | 450 +++-- connpy/completion.py | 10 + connpy/connapp.py | 8 +- connpy/core.py | 34 +- connpy/grpc_layer/connpy_pb2.py | 160 +- connpy/grpc_layer/server.py | 272 ++- connpy/grpc_layer/stubs.py | 32 +- connpy/printer.py | 70 +- connpy/proto/connpy.proto | 1 + connpy/services/ai_service.py | 59 + connpy/services/config_service.py | 4 + connpy/tunnels.py | 12 +- docker-compose.yml | 18 +- docker/{connpy => config}/.gitignore | 0 dockerfile | 72 +- docs/connpy/cli/ai_handler.html | 251 ++- docs/connpy/cli/api_handler.html | 4 +- docs/connpy/cli/config_handler.html | 4 +- docs/connpy/cli/context_handler.html | 4 +- docs/connpy/cli/forms.html | 179 +- docs/connpy/cli/help_text.html | 4 +- docs/connpy/cli/helpers.html | 4 +- docs/connpy/cli/import_export_handler.html | 4 +- docs/connpy/cli/index.html | 9 +- docs/connpy/cli/node_handler.html | 4 +- docs/connpy/cli/plugin_handler.html | 4 +- docs/connpy/cli/profile_handler.html | 4 +- docs/connpy/cli/run_handler.html | 12 +- docs/connpy/cli/sync_handler.html | 4 +- docs/connpy/cli/terminal_ui.html | 899 +++++++++ docs/connpy/cli/validators.html | 4 +- docs/connpy/grpc_layer/connpy_pb2.html | 137 +- docs/connpy/grpc_layer/connpy_pb2_grpc.html | 720 +++++--- docs/connpy/grpc_layer/index.html | 4 +- docs/connpy/grpc_layer/remote_plugin_pb2.html | 12 +- .../grpc_layer/remote_plugin_pb2_grpc.html | 4 +- docs/connpy/grpc_layer/server.html | 256 ++- docs/connpy/grpc_layer/stubs.html | 656 ++++++- docs/connpy/grpc_layer/utils.html | 4 +- docs/connpy/index.html | 1606 +++++++++++------ docs/connpy/mcp_client.html | 349 ++++ docs/connpy/proto/index.html | 4 +- docs/connpy/services/ai_service.html | 355 +++- docs/connpy/services/base.html | 4 +- docs/connpy/services/config_service.html | 12 +- docs/connpy/services/context_service.html | 4 +- docs/connpy/services/exceptions.html | 4 +- docs/connpy/services/execution_service.html | 20 +- .../services/import_export_service.html | 4 +- docs/connpy/services/index.html | 489 ++++- docs/connpy/services/node_service.html | 4 +- docs/connpy/services/plugin_service.html | 114 +- docs/connpy/services/profile_service.html | 4 +- docs/connpy/services/provider.html | 4 +- docs/connpy/services/sync_service.html | 4 +- docs/connpy/services/system_service.html | 4 +- docs/connpy/tests/conftest.html | 4 +- docs/connpy/tests/index.html | 9 +- docs/connpy/tests/test_ai.html | 4 +- docs/connpy/tests/test_ai_copilot.html | 315 ++++ docs/connpy/tests/test_capture.html | 4 +- docs/connpy/tests/test_completion.html | 4 +- docs/connpy/tests/test_configfile.html | 4 +- docs/connpy/tests/test_connapp.html | 4 +- docs/connpy/tests/test_core.html | 4 +- docs/connpy/tests/test_execution_service.html | 4 +- docs/connpy/tests/test_grpc_layer.html | 20 +- docs/connpy/tests/test_hooks.html | 4 +- docs/connpy/tests/test_node_service.html | 4 +- docs/connpy/tests/test_plugins.html | 4 +- docs/connpy/tests/test_printer.html | 4 +- .../tests/test_printer_concurrency.html | 4 +- docs/connpy/tests/test_profile_service.html | 4 +- docs/connpy/tests/test_provider.html | 4 +- docs/connpy/tests/test_sync.html | 4 +- docs/connpy/tunnels.html | 91 +- docs/connpy/utils.html | 130 ++ requirements.txt | 5 + setup.cfg | 9 + 87 files changed, 6715 insertions(+), 2552 deletions(-) create mode 100644 .dockerignore create mode 100644 MANIFEST.in rename docker/{connpy => config}/.gitignore (100%) create mode 100644 docs/connpy/cli/terminal_ui.html create mode 100644 docs/connpy/mcp_client.html create mode 100644 docs/connpy/tests/test_ai_copilot.html create mode 100644 docs/connpy/utils.html diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..7ae3f2b --- /dev/null +++ b/.dockerignore @@ -0,0 +1,22 @@ +.git +__pycache__ +*.pyc +*.pyo +*.pyd +.pytest_cache +.venv +venv +env +node_modules +dist +build +*.egg-info +docker +docker-compose.yml +.gemini +.github +docs +scratch +testall +testremote +automation-template.yaml diff --git a/.gitignore b/.gitignore index dc4eee6..5456202 100644 --- a/.gitignore +++ b/.gitignore @@ -164,3 +164,7 @@ connpy_roadmap.md MULTI_USER_PLAN.md COPILOT_PLAN.md ARCHITECTURAL_DEBT_REFACTOR.md + +#themes +nord.yml +theme.py diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..4a319b4 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,8 @@ +include LICENSE +include README.md +include requirements.txt +recursive-include connpy/core_plugins * +recursive-include connpy/proto * +recursive-include connpy/grpc *.proto +recursive-exclude * __pycache__ +recursive-exclude * *.py[co] diff --git a/README.md b/README.md index d5c32af..3439f89 100644 --- a/README.md +++ b/README.md @@ -9,519 +9,182 @@ [![](https://img.shields.io/pypi/l/connpy.svg?style=flat-square)](https://github.com/fluzzi/connpy/blob/main/LICENSE) [![](https://img.shields.io/pypi/dm/connpy.svg?style=flat-square)](https://pypi.org/pypi/connpy/) -Connpy is a SSH, SFTP, Telnet, kubectl, Docker pod, and AWS SSM connection manager and automation module for Linux, Mac, and Docker. +**Connpy** is a powerful Connection Manager and Network Automation Platform for Linux, Mac, and Docker. It provides a unified interface for **SSH, SFTP, Telnet, kubectl, Docker pods, and AWS SSM**. + +The v6 release introduces the **AI Copilot**, an interactive terminal assistant that understands your network context and helps you manage your infrastructure more intelligently. + + +## πŸ€– AI Copilot (New in v6) +The AI Copilot is deeply integrated into your terminal workflow: +- **Terminal Context Awareness**: The Copilot can "see" your screen output, helping you diagnose errors or analyze command results in real-time. +- **Hybrid Multi-Agent System**: Automatically escalates complex tasks between the **Network Engineer** (execution) and the **Network Architect** (strategy). +- **MCP Integration**: Dynamically load tools from external providers (6WIND, AWS, etc.) via the Model Context Protocol. +- **Interactive Chat**: Launch with `conn ai` for a collaborative troubleshooting session. + + +## Core Features +- **Multi-Protocol**: Native support for SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM. +- **Context Management**: Set regex-based contexts to manage specific nodes across different environments (work, home, clients). +- **Advanced Inventory**: + - Organize nodes in folders (`@folder`) and subfolders (`@subfolder@folder`). + - Use Global Profiles (`@profilename`) to manage shared credentials easily. + - Bulk creation, copying, moving, and export/import of nodes. +- **Modern UI**: High-performance terminal experience with `prompt-toolkit`, including: + - Fuzzy search integration with `fzf`. + - Advanced tab completion. + - Syntax highlighting and customizable themes. +- **Automation Engine**: Run parallel tasks and playbooks on multiple devices with variable support. +- **Plugin System**: Build and execute custom Python scripts locally or on a remote gRPC server. +- **gRPC Architecture**: Fully decoupled Client/Server model for distributed management. +- **Privacy & Sync**: Local-first encrypted storage (RSA/OAEP) with optional Google Drive backup. ## Installation +```bash pip install connpy - -### Run it in Windows using docker ``` + +### Run it in Windows/Linux using Docker +```bash git clone https://github.com/fluzzi/connpy -docker compose -f path/to/folder/docker-compose.yml build -docker compose -f path/to/folder/docker-compose.yml run -it connpy-app +cd connpy +docker compose build + +# Run it like a native app (completely silent) +docker compose --log-level ERROR run --rm --remove-orphans connpy-app [command] + +# Pro Tip: Add this alias for a 100% native experience from any folder +alias conn='docker compose -f /path/to/connpy/docker-compose.yml --log-level ERROR run --rm --remove-orphans connpy-app' ``` -## Connection manager +--- + +## πŸ”’ Privacy & Integration + ### Privacy Policy - -Connpy is committed to protecting your privacy. Our privacy policy explains how we handle user data: - -- **Data Access**: Connpy accesses data necessary for managing remote host connections, including server addresses, usernames, and passwords. This data is stored locally on your machine and is not transmitted or shared with any third parties. -- **Data Usage**: User data is used solely for the purpose of managing and automating SSH, Telnet, and SSM connections. -- **Data Storage**: All connection details are stored locally and securely on your device. We do not store or process this data on our servers. -- **Data Sharing**: We do not share any user data with third parties. +Connpy is committed to protecting your privacy: +- **Local Storage**: All server addresses, usernames, and passwords are encrypted and stored **only** on your machine. No data is transmitted to our servers. +- **Data Access**: Data is used solely for managing and automating your connections. ### Google Integration +Used strictly for backup: +- **Backup**: Sync your encrypted configuration with your Google Drive account. +- **Scoped Access**: Connpy only accesses its own backup files. -Connpy integrates with Google services for backup purposes: +--- -- **Configuration Backup**: The app allows users to store their device information in the app configuration. This configuration can be synced with Google services to create backups. -- **Data Access**: Connpy only accesses its own files and does not access any other files on your Google account. -- **Data Usage**: The data is used solely for backup and restore purposes, ensuring that your device information and configurations are safe and recoverable. -- **Data Sharing**: Connpy does not share any user data with third parties, including Google. The backup data is only accessible by the user. +## Usage -For more detailed information, please read our [Privacy Policy](https://connpy.gederico.dynu.net/fluzzi32/connpy/src/branch/main/PRIVATE_POLICY.md). - - -### Features - - Manage connections using SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM. - - Set contexts to manage specific nodes from specific contexts (work/home/clients/etc). - - You can generate profiles and reference them from nodes using @profilename so you don't - need to edit multiple nodes when changing passwords or other information. - - Nodes can be stored on @folder or @subfolder@folder to organize your devices. They can - be referenced using node@subfolder@folder or node@folder. - - If you have too many nodes, get a completion script using: conn config --completion. - Or use fzf by installing pyfzf and running conn config --fzf true. - - Create in bulk, copy, move, export, and import nodes for easy management. - - Run automation scripts on network devices. - - Use AI with a multi-agent system (Engineer/Architect) to manage devices. - Supports any LLM provider via litellm (OpenAI, Anthropic, Google, etc.). - Features streaming responses, interactive chat, and extensible plugin tools. - - Add plugins with your own scripts, and execute them remotely. - - Fully decoupled gRPC Client/Server architecture. - - Unified UI with syntax highlighting and theming. - - Much more! - -### Usage: -``` +```bash usage: conn [-h] [--add | --del | --mod | --show | --debug] [node|folder] [--sftp] - conn {profile,move,mv,copy,cp,list,ls,bulk,export,import,ai,run,api,plugin,config,sync,context} ... - -positional arguments: - node|folder node[@subfolder][@folder] - Connect to specific node or show all matching nodes - [@subfolder][@folder] - Show all available connections globally or in specified path - -options: - -h, --help show this help message and exit - -v, --version Show version - -a, --add Add new node[@subfolder][@folder] or [@subfolder]@folder - -r, --del, --rm Delete node[@subfolder][@folder] or [@subfolder]@folder - -e, --mod, --edit Modify node[@subfolder][@folder] - -s, --show Show node[@subfolder][@folder] - -d, --debug Display all conections steps - -t, --sftp Connects using sftp instead of ssh - --service-mode Set the backend service mode (local or remote) - --remote Connect to a remote connpy service via gRPC - --theme UI Output theme (dark, light, or path) - -Commands: - profile Manage profiles - move(mv) Move node - copy(cp) Copy node - list(ls) List profiles, nodes or folders - bulk Add nodes in bulk - export Export connection folder to Yaml file - import Import connection folder to config from Yaml file - ai Make request to an AI - run Run scripts or commands on nodes - api Start and stop connpy api - plugin Manage plugins - config Manage app config - sync Sync config with Google - context Manage contexts with regex matching + conn {profile,move,copy,list,bulk,export,import,ai,run,api,plugin,config,sync,context} ... ``` -### Manage profiles: -``` -usage: conn profile [-h] (--add | --del | --mod | --show) profile +### Basic Examples: +```bash +# Add a folder and subfolder +conn --add @office +conn --add @datacenter@office -positional arguments: - profile Name of profile to manage +# Add a node with a profile +conn --add server1@datacenter@office --profile @myuser -options: - -h, --help show this help message and exit - -a, --add Add new profile - -r, --del, --rm Delete profile - -e, --mod, --edit Modify profile - -s, --show Show profile +# Connect to a node (fuzzy match) +conn server1 +# Start the AI Copilot +conn ai + +# Run a command on all nodes in a folder +conn run @office "uptime" ``` -### Examples: -``` - #Add new profile - conn profile --add office-user - #Add new folder - conn --add @office - #Add new subfolder - conn --add @datacenter@office - #Add node to subfolder - conn --add server@datacenter@office - #Add node to folder - conn --add pc@office - #Show node information - conn --show server@datacenter@office - #Connect to nodes - conn pc@office - conn server - #Create and set new context - conn context -a office .*@office - conn context --set office - #Run a command in a node - conn run server ls -la -``` +--- + +## πŸ”Œ Plugin System +Connpy supports a robust plugin architecture where scripts can run transparently on a remote gRPC server. + +### Structure +Plugins must be Python files containing: +- **Class `Parser`**: Defines `argparse` arguments. +- **Class `Entrypoint`**: Execution logic. +- **Class `Preload`**: (Optional) Hooks and modifications to the core app. + +See the [Plugin Requirements section](#plugin-requirements-for-connpy) for full technical details. + +--- + ## Plugin Requirements for Connpy ### Remote Plugin Execution When Connpy operates in remote mode, plugins are executed **transparently on the server**: - The client automatically downloads the plugin source code (`Parser` class context) to generate the local `argparse` structure and provide autocompletion. -- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory, ensuring the plugin runs securely against the server's inventory without passing sensitive data to the client. -- You can manage remote plugins using the `--remote` flag (e.g. `connpy plugin --add myplugin script.py --remote`). +- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory. +- You can manage remote plugins using the `--remote` flag. ### General Structure -- The plugin script must be a Python file. -- Only the following top-level elements are allowed in the plugin script: - - Class definitions - - Function definitions - - Import statements - - The `if __name__ == "__main__":` block for standalone execution - - Pass statements - -### Specific Class Requirements -- The plugin script must define specific classes with particular attributes and methods. Each class serves a distinct role within the plugin's architecture: - 1. **Class `Parser`**: - - **Purpose**: Handles parsing of command-line arguments. - - **Requirements**: - - Must contain only one method: `__init__`. - - The `__init__` method must initialize at least one attribute: - - `self.parser`: An instance of `argparse.ArgumentParser`. - 2. **Class `Entrypoint`**: - - **Purpose**: Acts as the entry point for plugin execution, utilizing parsed arguments and integrating with the main application. - - **Requirements**: - - Must have an `__init__` method that accepts exactly three parameters besides `self`: - - `args`: Arguments passed to the plugin. - - The parser instance (typically `self.parser` from the `Parser` class). - - The Connapp instance to interact with the Connpy app. - 3. **Class `Preload`**: - - **Purpose**: Performs any necessary preliminary setup or configuration independent of the main parsing and entry logic. - - **Requirements**: - - Contains at least an `__init__` method that accepts parameter connapp besides `self`. - -### Class Dependencies and Combinations -- **Dependencies**: - - `Parser` and `Entrypoint` are interdependent and must both be present if one is included. - - `Preload` is independent and may exist alone or alongside the other classes. -- **Valid Combinations**: - - `Parser` and `Entrypoint` together. - - `Preload` alone. - - All three classes (`Parser`, `Entrypoint`, `Preload`). +- The plugin script must define specific classes: + 1. **Class `Parser`**: Handles `argparse.ArgumentParser` initialization. + 2. **Class `Entrypoint`**: Main execution logic (receives `args`, `parser`, and `connapp`). + 3. **Class `Preload`**: (Optional) For modifying core app behavior or registering hooks. ### Preload Modifications and Hooks - -In the `Preload` class of the plugin system, you have the ability to customize the behavior of existing classes and methods within the application through a robust hooking system. This documentation explains how to use the `modify`, `register_pre_hook`, and `register_post_hook` methods to tailor plugin functionality to your needs. - -#### Modifying Classes with `modify` -The `modify` method allows you to alter instances of a class at the time they are created or after their creation. This is particularly useful for setting or modifying configuration settings, altering default behaviors, or adding new functionalities to existing classes without changing the original class definitions. - -- **Usage**: Modify a class to include additional configurations or changes -- **Modify Method Signature**: - - `modify(modification_method)`: A function that is invoked with an instance of the class as its argument. This function should perform any modifications directly on this instance. -- **Modification Method Signature**: - - **Arguments**: - - `cls`: This function accepts a single argument, the class instance, which it then modifies. - - **Modifiable Classes**: - - `connapp.config` - - `connapp.node` - - `connapp.nodes` - - `connapp.ai` - - ```python - def modify_config(cls): - # Example modification: adding a new attribute or modifying an existing one - cls.new_attribute = 'New Value' - - class Preload: - def __init__(self, connapp): - # Applying modification to the config class instance - connapp.config.modify(modify_config) - ``` - -#### Implementing Method Hooks -There are 2 methods that allows you to define custom logic to be executed before (`register_pre_hook`) or after (`register_post_hook`) the main logic of a method. This is particularly useful for logging, auditing, preprocessing inputs, postprocessing outputs or adding functionalities. - - - **Usage**: Register hooks to methods to execute additional logic before or after the main method execution. -- **Registration Methods Signature**: - - `register_pre_hook(pre_hook_method)`: A function that is invoked before the main method is executed. This function should do preprocessing of the arguments. - - `register_post_hook(post_hook_method)`: A function that is invoked after the main method is executed. This function should do postprocessing of the outputs. -- **Method Signatures for Pre-Hooks** - - `pre_hook_method(*args, **kwargs)` - - **Arguments**: - - `*args`, `**kwargs`: The arguments and keyword arguments that will be passed to the method being hooked. The pre-hook function has the opportunity to inspect and modify these arguments before they are passed to the main method. - - **Return**: - - Must return a tuple `(args, kwargs)`, which will be used as the new arguments for the main method. If the original arguments are not modified, the function should return them as received. -- **Method Signatures for Post-Hooks**: - - `post_hook_method(*args, **kwargs)` - - **Arguments**: - - `*args`, `**kwargs`: The arguments and keyword arguments that were passed to the main method. - - `kwargs["result"]`: The value returned by the main method. This allows the post-hook to inspect and even alter the result before it is returned to the original caller. - - **Return**: - - Can return a modified result, which will replace the original result of the main method, or simply return `kwargs["result"]` to return the original method result. - - ```python - def pre_processing_hook(*args, **kwargs): - print("Pre-processing logic here") - # Modify arguments or perform any checks - return args, kwargs # Return modified or unmodified args and kwargs - - def post_processing_hook(*args, **kwargs): - print("Post-processing logic here") - # Modify the result or perform any final logging or cleanup - return kwargs["result"] # Return the modified or unmodified result - - class Preload: - def __init__(self, connapp): - # Registering a pre-hook - connapp.ai.some_method.register_pre_hook(pre_processing_hook) - - # Registering a post-hook - connapp.node.another_method.register_post_hook(post_processing_hook) - ``` - - -### Executable Block -- The plugin script can include an executable block: - - `if __name__ == "__main__":` - - This block allows the plugin to be run as a standalone script for testing or independent use. +You can customize the behavior of core classes using hooks: +- **`modify(method)`**: Alter class instances (e.g., `connapp.config`, `connapp.ai`). +- **`register_pre_hook(method)`**: Logic to run before a method execution. +- **`register_post_hook(method)`**: Logic to run after a method execution. ### Command Completion Support +Plugins can provide intelligent tab completion: +1. **Tree-based Completion (Recommended)**: Define `_connpy_tree(info)` returning a navigation dictionary. +2. **Legacy Completion**: Define `_connpy_completion(wordsnumber, words, info)`. -Plugins can provide intelligent **tab completion** by defining autocompletion logic. There are two supported methods, with the tree-based approach being the most modern and recommended. +--- -#### 1. Tree-based Completion (Recommended) +## βš™οΈ gRPC Service Architecture +Connpy can operate in a decoupled mode: +1. **Start the API (Server)**: `conn api -s 50051` +2. **Configure the Client**: + ```bash + conn config --service-mode remote + conn config --remote-host localhost:50051 + ``` +All inventory management and execution will now happen on the server. -Define a function called `_connpy_tree` that returns a declarative navigation tree. This method is highly efficient, supports complex state loops, and is very simple to implement for most use cases. +--- +## 🐍 Automation Module (API) +You can use `connpy` as a Python library for your own scripts. + +### Basic Execution ```python -def _connpy_tree(info=None): - nodes = info.get("nodes", []) - return { - "__exclude_used__": True, # Filter out words already typed - "__extra__": nodes, # Suggest nodes at this level - "--format": ["json", "yaml", "table"], # Fixed suggestions - "*": { # Wildcard matches any positional word - "interface1": None, - "interface2": None, - "--verbose": None - } - } -``` - -- **Keys**: Literal completions (exact matches). -- **`*` Key**: A wildcard that matches any positional word typed by the user. -- **`__extra__`**: A list or a callable `(words) -> list` that adds dynamic suggestions. -- **`__exclude_used__`**: (Boolean) If True, automatically filters out words already present in the command line. - -#### 2. Legacy Function-based Completion - -For backward compatibility or highly custom logic, you can define `_connpy_completion`. - -```python -def _connpy_completion(wordsnumber, words, info=None): - if wordsnumber == 3: - return ["--help", "--verbose", "start", "stop"] - - elif wordsnumber == 4 and words[2] == "start": - return info["nodes"] # Suggest node names - - return [] -``` - -| Parameter | Description | -|----------------|-------------| -| `wordsnumber` | Integer indicating the total number of words on the command line. For plugins, this typically starts at 3. | -| `words` | A list of tokens (words) already typed. `words[0]` is always the name of the plugin. | -| `info` | A dictionary of structured context data (`nodes`, `folders`, `profiles`, `config`). | - -> In this example, if the user types `connpy myplugin start ` and presses Tab, it will suggest node names. - -### Handling Unknown Arguments - -Plugins can choose to accept and process unknown arguments that are **not explicitly defined** in the parser. To enable this behavior, the plugin must define the following hidden argument in its `Parser` class: - -``` -self.parser.add_argument( - "--unknown-args", - action="store_true", - default=True, - help=argparse.SUPPRESS -) -``` - -#### Behavior: - -- When this argument is present, Connpy will parse the known arguments and capture any extra (unknown) ones. -- These unknown arguments will be passed to the plugin as `args.unknown_args` inside the `Entrypoint`. -- If the user does not pass any unknown arguments, `args.unknown_args` will contain the default value (`True`, unless overridden). - -#### Example: - -If a plugin accepts unknown tcpdump flags like this: - -``` -connpy myplugin -nn -s0 -``` - -And defines the hidden `--unknown-args` flag as shown above, then: - -- `args.unknown_args` inside `Entrypoint.__init__()` will be: `['-nn', '-s0']` - -> This allows the plugin to receive and process arguments intended for external tools (e.g., `tcpdump`) without argparse raising an error. - -#### Note: - -If a plugin does **not** define `--unknown-args`, any extra arguments passed will cause argparse to fail with an unrecognized arguments error. - -### Script Verification -- The `verify_script` method in `plugins.py` is used to check the plugin script's compliance with these standards. -- Non-compliant scripts will be rejected to ensure consistency and proper functionality within the plugin system. - -### Example Script - -For a practical example of how to write a compatible plugin script, please refer to the following example: - -[Example Plugin Script](https://github.com/fluzzi/awspy) - -This script demonstrates the required structure and implementation details according to the plugin system's standards. - -## Automation module usage -### Standalone module -``` import connpy -router = connpy.node("uniqueName","ip/host", user="username", password="password") -router.run(["term len 0","show run"]) +router = connpy.node("uniqueName", "1.1.1.1", user="admin") +router.run(["show ip int brief"]) print(router.output) -hasip = router.test("show ip int brief","1.1.1.1") -if hasip: - print("Router has ip 1.1.1.1") -else: - print("router does not have ip 1.1.1.1") ``` -### Using manager configuration -``` -import connpy -conf = connpy.configfile() -device = conf.getitem("router@office") -router = connpy.node("unique name", **device, config=conf) -result = router.run("show ip int brief") -print(result) -``` -### Running parallel tasks on multiple devices -``` -import connpy -conf = connpy.configfile() -#You can get the nodes from the config from a folder and fitlering in it -nodes = conf.getitem("@office", ["router1", "router2", "router3"]) -#You can also get each node individually: -nodes = {} -nodes["router1"] = conf.getitem("router1@office") -nodes["router2"] = conf.getitem("router2@office") -nodes["router10"] = conf.getitem("router10@datacenter") -#Also, you can create the nodes manually: -nodes = {} -nodes["router1"] = {"host": "1.1.1.1", "user": "user", "password": "password1"} -nodes["router2"] = {"host": "1.1.1.2", "user": "user", "password": "password2"} -nodes["router3"] = {"host": "1.1.1.2", "user": "user", "password": "password3"} -#Finally you run some tasks on the nodes -mynodes = connpy.nodes(nodes, config = conf) -result = mynodes.test(["show ip int br"], "1.1.1.2") -for i in result: - print("---" + i + "---") - print(result[i]) - print() -# Or for one specific node -mynodes.router1.run(["term len 0". "show run"], folder = "/home/user/logs") -``` -### Using variables -``` +### Parallel Tasks with Variables +```python import connpy config = connpy.configfile() -nodes = config.getitem("@office", ["router1", "router2", "router3"]) -commands = [] -commands.append("config t") -commands.append("interface lo {id}") -commands.append("ip add {ip} {mask}") -commands.append("end") -variables = {} -variables["router1@office"] = {"ip": "10.57.57.1"} -variables["router2@office"] = {"ip": "10.57.57.2"} -variables["router3@office"] = {"ip": "10.57.57.3"} -variables["__global__"] = {"id": "57"} -variables["__global__"]["mask"] = "255.255.255.255" -expected = "!" -routers = connpy.nodes(nodes, config = config) -routers.run(commands, variables) -routers.test("ping {ip}", expected, variables) -for key in routers.result: - print(key, ' ---> ', ("pass" if routers.result[key] else "fail")) +nodes = config.getitem("@office", ["router1", "router2"]) +routers = connpy.nodes(nodes, config=config) + +variables = { + "router1@office": {"id": "1"}, + "__global__": {"mask": "255.255.255.0"} +} +routers.run(["interface lo{id}", "ip address 10.0.0.{id} {mask}"], variables) ``` -### Using AI -The AI module uses a multi-agent architecture with an **Engineer** (fast execution) and an **Architect** (strategic reasoning). It supports any LLM provider through [litellm](https://github.com/BerriAI/litellm). + +### AI Programmatic Use ```python import connpy -conf = connpy.configfile() -# Uses models and API keys from config, or override them: -myai = connpy.ai(conf, engineer_model="gemini/gemini-2.5-flash", engineer_api_key="your-key") -result = myai.ask("go to router1 and show me the running configuration") -print(result["response"]) -# Streaming is enabled by default for CLI, disable for programmatic use: -result = myai.ask("show interfaces on all routers", stream=False) -print(result["response"]) +myai = connpy.ai(connpy.configfile()) +response = myai.ask("What is the status of the BGP neighbors in the office?") ``` -#### AI Plugin Tool Registration -Plugins can extend the AI system by registering custom tools via the `Preload` class: -```python -def _register_my_tools(ai_instance): - tool_def = { - "type": "function", - "function": { - "name": "my_custom_tool", - "description": "Does something useful.", - "parameters": { - "type": "object", - "properties": {"query": {"type": "string"}}, - "required": ["query"] - } - } - } - ai_instance.register_ai_tool( - tool_definition=tool_def, - handler=my_handler_function, - target="engineer", # or "architect" or "both" - engineer_prompt="- My tool: does X.", - architect_prompt=" * My tool (my_custom_tool)." - ) - -class Preload: - def __init__(self, connapp): - connapp.ai.modify(_register_my_tools) -``` -## gRPC Service Architecture -Connpy features a completely decoupled gRPC Client/Server architecture. You can run Connpy as a standalone background service and connect to it remotely via the CLI or other clients. - -### 1. Start the Server -Start the gRPC service by running: -```bash -connpy api -s 50051 -``` -The server will handle all configurations, connections, AI sessions, and plugin execution locally on the machine it runs on. - -### 2. Connect the Client -Configure your local CLI client to connect to the remote server: -```bash -connpy config --service-mode remote -connpy config --remote-host localhost:50051 -``` -Once configured, all commands (`connpy node`, `connpy list`, `connpy ai`, etc.) will execute transparently on the remote server via thin-client proxies. You can revert back to standalone execution at any time by running `connpy config --service-mode local`. - -### Programmatic Access (gRPC & SOA) -If you wish to build your own application (Web, Desktop, or Scripts) using the Connpy backend, you can use the `ServiceProvider` to interact with either a local or remote service transparently. - -```python -import connpy -from connpy.services.provider import ServiceProvider - -# Initialize local config -config = connpy.configfile() - -# Connect to the remote gRPC service -services = ServiceProvider( - config, - mode="remote", - remote_host="localhost:50051" -) - -# Use any service (the logic is identical to local mode) -nodes = services.nodes.list_nodes() -for name in nodes: - print(f"Found node: {name}") - -# Run a command remotely via streaming -for chunk in services.execution.run_commands(nodes=["server1"], commands=["uptime"]): - print(chunk["output"], end="") -``` - - +--- +*For detailed developer notes and plugin hooks documentation, see the [Documentation](https://fluzzi.github.io/connpy/).* diff --git a/connpy/__init__.py b/connpy/__init__.py index 50c00bc..6b200dd 100644 --- a/connpy/__init__.py +++ b/connpy/__init__.py @@ -1,476 +1,182 @@ #!/usr/bin/env python3 ''' -## Connection manager +

+ App Logo +

-Connpy is a SSH, SFTP, Telnet, kubectl, Docker pod, and AWS SSM connection manager and automation module for Linux, Mac, and Docker. -### Features - - Manage connections using SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM. - - Set contexts to manage specific nodes from specific contexts (work/home/clients/etc). - - You can generate profiles and reference them from nodes using @profilename so you don't - need to edit multiple nodes when changing passwords or other information. - - Nodes can be stored on @folder or @subfolder@folder to organize your devices. They can - be referenced using node@subfolder@folder or node@folder. - - If you have too many nodes, get a completion script using: conn config --completion. - Or use fzf by installing pyfzf and running conn config --fzf true. - - Create in bulk, copy, move, export, and import nodes for easy management. - - Run automation scripts on network devices. - - Use AI with a multi-agent system (Engineer/Architect) to help you manage your devices. - Supports any LLM provider via litellm (OpenAI, Anthropic, Google, etc.). - - Add plugins with your own scripts, and execute them remotely. - - Fully decoupled gRPC Client/Server architecture. - - Unified UI with syntax highlighting and theming. - - Much more! +# Connpy +[![](https://img.shields.io/pypi/v/connpy.svg?style=flat-square)](https://pypi.org/pypi/connpy/) +[![](https://img.shields.io/pypi/pyversions/connpy.svg?style=flat-square)](https://pypi.org/pypi/connpy/) +[![](https://img.shields.io/pypi/l/connpy.svg?style=flat-square)](https://github.com/fluzzi/connpy/blob/main/LICENSE) +[![](https://img.shields.io/pypi/dm/connpy.svg?style=flat-square)](https://pypi.org/pypi/connpy/) -### Usage +**Connpy** is a powerful Connection Manager and Network Automation Platform for Linux, Mac, and Docker. It provides a unified interface for **SSH, SFTP, Telnet, kubectl, Docker pods, and AWS SSM**. + +The v6 release introduces the **AI Copilot**, an interactive terminal assistant that understands your network context and helps you manage your infrastructure more intelligently. + + +## πŸ€– AI Copilot (New in v6) +The AI Copilot is deeply integrated into your terminal workflow: +- **Terminal Context Awareness**: The Copilot can "see" your screen output, helping you diagnose errors or analyze command results in real-time. +- **Hybrid Multi-Agent System**: Automatically escalates complex tasks between the **Network Engineer** (execution) and the **Network Architect** (strategy). +- **MCP Integration**: Dynamically load tools from external providers (6WIND, AWS, etc.) via the Model Context Protocol. +- **Interactive Chat**: Launch with `conn ai` for a collaborative troubleshooting session. + + +## Core Features +- **Multi-Protocol**: Native support for SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM. +- **Context Management**: Set regex-based contexts to manage specific nodes across different environments (work, home, clients). +- **Advanced Inventory**: + - Organize nodes in folders (`@folder`) and subfolders (`@subfolder@folder`). + - Use Global Profiles (`@profilename`) to manage shared credentials easily. + - Bulk creation, copying, moving, and export/import of nodes. +- **Modern UI**: High-performance terminal experience with `prompt-toolkit`, including: + - Fuzzy search integration with `fzf`. + - Advanced tab completion. + - Syntax highlighting and customizable themes. +- **Automation Engine**: Run parallel tasks and playbooks on multiple devices with variable support. +- **Plugin System**: Build and execute custom Python scripts locally or on a remote gRPC server. +- **gRPC Architecture**: Fully decoupled Client/Server model for distributed management. +- **Privacy & Sync**: Local-first encrypted storage (RSA/OAEP) with optional Google Drive backup. + + +## Installation + +```bash +pip install connpy ``` + +### Run it in Windows/Linux using Docker +```bash +git clone https://github.com/fluzzi/connpy +cd connpy +docker compose build + +# Run it like a native app (completely silent) +docker compose --log-level ERROR run --rm --remove-orphans connpy-app [command] + +# Pro Tip: Add this alias for a 100% native experience from any folder +alias conn='docker compose -f /path/to/connpy/docker-compose.yml --log-level ERROR run --rm --remove-orphans connpy-app' +``` + +--- + +## πŸ”’ Privacy & Integration + +### Privacy Policy +Connpy is committed to protecting your privacy: +- **Local Storage**: All server addresses, usernames, and passwords are encrypted and stored **only** on your machine. No data is transmitted to our servers. +- **Data Access**: Data is used solely for managing and automating your connections. + +### Google Integration +Used strictly for backup: +- **Backup**: Sync your encrypted configuration with your Google Drive account. +- **Scoped Access**: Connpy only accesses its own backup files. + +--- + +## Usage + +```bash usage: conn [-h] [--add | --del | --mod | --show | --debug] [node|folder] [--sftp] - conn {profile,move,mv,copy,cp,list,ls,bulk,export,import,ai,run,api,plugin,config,sync,context} ... - -positional arguments: - node|folder node[@subfolder][@folder] - Connect to specific node or show all matching nodes - [@subfolder][@folder] - Show all available connections globally or in specified path - -options: - -h, --help show this help message and exit - -v, --version Show version - -a, --add Add new node[@subfolder][@folder] or [@subfolder]@folder - -r, --del, --rm Delete node[@subfolder][@folder] or [@subfolder]@folder - -e, --mod, --edit Modify node[@subfolder][@folder] - -s, --show Show node[@subfolder][@folder] - -d, --debug Display all conections steps - -t, --sftp Connects using sftp instead of ssh - --service-mode Set the backend service mode (local or remote) - --remote Connect to a remote connpy service via gRPC - --theme UI Output theme (dark, light, or path) - -Commands: - profile Manage profiles - move(mv) Move node - copy(cp) Copy node - list(ls) List profiles, nodes or folders - bulk Add nodes in bulk - export Export connection folder to Yaml file - import Import connection folder to config from Yaml file - ai Make request to an AI - run Run scripts or commands on nodes - api Start and stop connpy api - plugin Manage plugins - config Manage app config - sync Sync config with Google - context Manage contexts with regex matching + conn {profile,move,copy,list,bulk,export,import,ai,run,api,plugin,config,sync,context} ... ``` -### Manage profiles -``` -usage: conn profile [-h] (--add | --del | --mod | --show) profile +### Basic Examples: +```bash +# Add a folder and subfolder +conn --add @office +conn --add @datacenter@office -positional arguments: - profile Name of profile to manage +# Add a node with a profile +conn --add server1@datacenter@office --profile @myuser -options: - -h, --help show this help message and exit - -a, --add Add new profile - -r, --del, --rm Delete profile - -e, --mod, --edit Modify profile - -s, --show Show profile +# Connect to a node (fuzzy match) +conn server1 +# Start the AI Copilot +conn ai + +# Run a command on all nodes in a folder +conn run @office "uptime" ``` -### Examples -``` - #Add new profile - conn profile --add office-user - #Add new folder - conn --add @office - #Add new subfolder - conn --add @datacenter@office - #Add node to subfolder - conn --add server@datacenter@office - #Add node to folder - conn --add pc@office - #Show node information - conn --show server@datacenter@office - #Connect to nodes - conn pc@office - conn server - #Create and set new context - conn context -a office .*@office - conn context --set office - #Run a command in a node - conn run server ls -la -``` +--- + ## Plugin Requirements for Connpy ### Remote Plugin Execution When Connpy operates in remote mode, plugins are executed **transparently on the server**: - The client automatically downloads the plugin source code (`Parser` class context) to generate the local `argparse` structure and provide autocompletion. -- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory, ensuring the plugin runs securely against the server's inventory without passing sensitive data to the client. -- You can manage remote plugins using the `--remote` flag (e.g. `connpy plugin --add myplugin script.py --remote`). +- The execution phase (`Entrypoint` class) is redirected via gRPC streams to execute in the server's memory. +- You can manage remote plugins using the `--remote` flag. ### General Structure -- The plugin script must be a Python file. -- Only the following top-level elements are allowed in the plugin script: - - Class definitions - - Function definitions - - Import statements - - The `if __name__ == "__main__":` block for standalone execution - - Pass statements - -### Specific Class Requirements -- The plugin script must define specific classes with particular attributes and methods. Each class serves a distinct role within the plugin's architecture: - 1. **Class `Parser`**: - - **Purpose**: Handles parsing of command-line arguments. - - **Requirements**: - - Must contain only one method: `__init__`. - - The `__init__` method must initialize at least one attribute: - - `self.parser`: An instance of `argparse.ArgumentParser`. - 2. **Class `Entrypoint`**: - - **Purpose**: Acts as the entry point for plugin execution, utilizing parsed arguments and integrating with the main application. - - **Requirements**: - - Must have an `__init__` method that accepts exactly three parameters besides `self`: - - `args`: Arguments passed to the plugin. - - The parser instance (typically `self.parser` from the `Parser` class). - - The Connapp instance to interact with the Connpy app. - 3. **Class `Preload`**: - - **Purpose**: Performs any necessary preliminary setup or configuration independent of the main parsing and entry logic. - - **Requirements**: - - Contains at least an `__init__` method that accepts parameter connapp besides `self`. - -### Class Dependencies and Combinations -- **Dependencies**: - - `Parser` and `Entrypoint` are interdependent and must both be present if one is included. - - `Preload` is independent and may exist alone or alongside the other classes. -- **Valid Combinations**: - - `Parser` and `Entrypoint` together. - - `Preload` alone. - - All three classes (`Parser`, `Entrypoint`, `Preload`). +- The plugin script must define specific classes: + 1. **Class `Parser`**: Handles `argparse.ArgumentParser` initialization. + 2. **Class `Entrypoint`**: Main execution logic (receives `args`, `parser`, and `connapp`). + 3. **Class `Preload`**: (Optional) For modifying core app behavior or registering hooks. ### Preload Modifications and Hooks - -In the `Preload` class of the plugin system, you have the ability to customize the behavior of existing classes and methods within the application through a robust hooking system. This documentation explains how to use the `modify`, `register_pre_hook`, and `register_post_hook` methods to tailor plugin functionality to your needs. - -#### Modifying Classes with `modify` -The `modify` method allows you to alter instances of a class at the time they are created or after their creation. This is particularly useful for setting or modifying configuration settings, altering default behaviors, or adding new functionalities to existing classes without changing the original class definitions. - -- **Usage**: Modify a class to include additional configurations or changes -- **Modify Method Signature**: - - `modify(modification_method)`: A function that is invoked with an instance of the class as its argument. This function should perform any modifications directly on this instance. -- **Modification Method Signature**: - - **Arguments**: - - `cls`: This function accepts a single argument, the class instance, which it then modifies. - - **Modifiable Classes**: - - `connapp.config` - - `connapp.node` - - `connapp.nodes` - - `connapp.ai` - - ```python - def modify_config(cls): - # Example modification: adding a new attribute or modifying an existing one - cls.new_attribute = 'New Value' - - class Preload: - def __init__(self, connapp): - # Applying modification to the config class instance - connapp.config.modify(modify_config) - ``` - -#### Implementing Method Hooks -There are 2 methods that allows you to define custom logic to be executed before (`register_pre_hook`) or after (`register_post_hook`) the main logic of a method. This is particularly useful for logging, auditing, preprocessing inputs, postprocessing outputs or adding functionalities. - - - **Usage**: Register hooks to methods to execute additional logic before or after the main method execution. -- **Registration Methods Signature**: - - `register_pre_hook(pre_hook_method)`: A function that is invoked before the main method is executed. This function should do preprocessing of the arguments. - - `register_post_hook(post_hook_method)`: A function that is invoked after the main method is executed. This function should do postprocessing of the outputs. -- **Method Signatures for Pre-Hooks** - - `pre_hook_method(*args, **kwargs)` - - **Arguments**: - - `*args`, `**kwargs`: The arguments and keyword arguments that will be passed to the method being hooked. The pre-hook function has the opportunity to inspect and modify these arguments before they are passed to the main method. - - **Return**: - - Must return a tuple `(args, kwargs)`, which will be used as the new arguments for the main method. If the original arguments are not modified, the function should return them as received. -- **Method Signatures for Post-Hooks**: - - `post_hook_method(*args, **kwargs)` - - **Arguments**: - - `*args`, `**kwargs`: The arguments and keyword arguments that were passed to the main method. - - `kwargs["result"]`: The value returned by the main method. This allows the post-hook to inspect and even alter the result before it is returned to the original caller. - - **Return**: - - Can return a modified result, which will replace the original result of the main method, or simply return `kwargs["result"]` to return the original method result. - - ```python - def pre_processing_hook(*args, **kwargs): - print("Pre-processing logic here") - # Modify arguments or perform any checks - return args, kwargs # Return modified or unmodified args and kwargs - - def post_processing_hook(*args, **kwargs): - print("Post-processing logic here") - # Modify the result or perform any final logging or cleanup - return kwargs["result"] # Return the modified or unmodified result - - class Preload: - def __init__(self, connapp): - # Registering a pre-hook - connapp.ai.some_method.register_pre_hook(pre_processing_hook) - - # Registering a post-hook - connapp.node.another_method.register_post_hook(post_processing_hook) - ``` - -### Executable Block -- The plugin script can include an executable block: - - `if __name__ == "__main__":` - - This block allows the plugin to be run as a standalone script for testing or independent use. +You can customize the behavior of core classes using hooks: +- **`modify(method)`**: Alter class instances (e.g., `connapp.config`, `connapp.ai`). +- **`register_pre_hook(method)`**: Logic to run before a method execution. +- **`register_post_hook(method)`**: Logic to run after a method execution. ### Command Completion Support +Plugins can provide intelligent tab completion: +1. **Tree-based Completion (Recommended)**: Define `_connpy_tree(info)` returning a navigation dictionary. +2. **Legacy Completion**: Define `_connpy_completion(wordsnumber, words, info)`. -Plugins can provide intelligent **tab completion** by defining autocompletion logic. There are two supported methods, with the tree-based approach being the most modern and recommended. +--- -#### 1. Tree-based Completion (Recommended) +## βš™οΈ gRPC Service Architecture +Connpy can operate in a decoupled mode: +1. **Start the API (Server)**: `conn api -s 50051` +2. **Configure the Client**: + ```bash + conn config --service-mode remote + conn config --remote-host localhost:50051 + ``` +All inventory management and execution will now happen on the server. -Define a function called `_connpy_tree` that returns a declarative navigation tree. This method is highly efficient, supports complex state loops, and is very simple to implement for most use cases. +--- +## 🐍 Automation Module (API) +You can use `connpy` as a Python library for your own scripts. + +### Basic Execution ```python -def _connpy_tree(info=None): - nodes = info.get("nodes", []) - return { - "__exclude_used__": True, # Filter out words already typed - "__extra__": nodes, # Suggest nodes at this level - "--format": ["json", "yaml", "table"], # Fixed suggestions - "*": { # Wildcard matches any positional word - "interface1": None, - "interface2": None, - "--verbose": None - } - } -``` - -- **Keys**: Literal completions (exact matches). -- **`*` Key**: A wildcard that matches any positional word typed by the user. -- **`__extra__`**: A list or a callable `(words) -> list` that adds dynamic suggestions. -- **`__exclude_used__`**: (Boolean) If True, automatically filters out words already present in the command line. - -#### 2. Legacy Function-based Completion - -For backward compatibility or highly custom logic, you can define `_connpy_completion`. - -```python -def _connpy_completion(wordsnumber, words, info=None): - if wordsnumber == 3: - return ["--help", "--verbose", "start", "stop"] - - elif wordsnumber == 4 and words[2] == "start": - return info["nodes"] # Suggest node names - - return [] -``` - -| Parameter | Description | -|----------------|-------------| -| `wordsnumber` | Integer indicating the total number of words on the command line. For plugins, this typically starts at 3. | -| `words` | A list of tokens (words) already typed. `words[0]` is always the name of the plugin. | -| `info` | A dictionary of structured context data (`nodes`, `folders`, `profiles`, `config`). | - -> In this example, if the user types `connpy myplugin start ` and presses Tab, it will suggest node names. - -### Handling Unknown Arguments - -Plugins can choose to accept and process unknown arguments that are **not explicitly defined** in the parser. To enable this behavior, the plugin must define the following hidden argument in its `Parser` class: - -``` -self.parser.add_argument( - "--unknown-args", - action="store_true", - default=True, - help=argparse.SUPPRESS -) -``` - -#### Behavior: - -- When this argument is present, Connpy will parse the known arguments and capture any extra (unknown) ones. -- These unknown arguments will be passed to the plugin as `args.unknown_args` inside the `Entrypoint`. -- If the user does not pass any unknown arguments, `args.unknown_args` will contain the default value (`True`, unless overridden). - -#### Example: - -If a plugin accepts unknown tcpdump flags like this: - -``` -connpy myplugin -nn -s0 -``` - -And defines the hidden `--unknown-args` flag as shown above, then: - -- `args.unknown_args` inside `Entrypoint.__init__()` will be: `['-nn', '-s0']` - -> This allows the plugin to receive and process arguments intended for external tools (e.g., `tcpdump`) without argparse raising an error. - -#### Note: - -If a plugin does **not** define `--unknown-args`, any extra arguments passed will cause argparse to fail with an unrecognized arguments error. - -### Script Verification -- The `verify_script` method in `plugins.py` is used to check the plugin script's compliance with these standards. -- Non-compliant scripts will be rejected to ensure consistency and proper functionality within the plugin system. -- -### Example Script - -For a practical example of how to write a compatible plugin script, please refer to the following example: - -[Example Plugin Script](https://github.com/fluzzi/awspy) - -This script demonstrates the required structure and implementation details according to the plugin system's standards. - -## gRPC Service Architecture -Connpy features a completely decoupled gRPC Client/Server architecture. You can run Connpy as a standalone background service and connect to it remotely via the CLI or other clients. - -### 1. Start the Server -Start the gRPC service by running: -```bash -connpy api -s 50051 -``` -The server will handle all configurations, connections, AI sessions, and plugin execution locally on the machine it runs on. - -### 2. Connect the Client -Configure your local CLI client to connect to the remote server: -```bash -connpy config --service-mode remote -connpy config --remote-host localhost:50051 -``` -Once configured, all commands (`connpy node`, `connpy list`, `connpy ai`, etc.) will execute transparently on the remote server via thin-client proxies. You can revert back to standalone execution at any time by running `connpy config --service-mode local`. - -### Programmatic Access (gRPC & SOA) -Developers can build their own applications using the Connpy backend by utilizing the `ServiceProvider`: - -```python -from connpy.services.provider import ServiceProvider -services = ServiceProvider(config, mode="remote", remote_host="localhost:50051") -nodes = services.nodes.list_nodes() -``` - - -## Automation module -The automation module -### Standalone module -``` import connpy -router = connpy.node("uniqueName","ip/host", user="user", password="pass") -router.run(["term len 0","show run"]) +router = connpy.node("uniqueName", "1.1.1.1", user="admin") +router.run(["show ip int brief"]) print(router.output) -hasip = router.test("show ip int brief","1.1.1.1") -if hasip: - print("Router has ip 1.1.1.1") -else: - print("router does not have ip 1.1.1.1") ``` -### Using manager configuration -``` -import connpy -conf = connpy.configfile() -device = conf.getitem("server@office") -server = connpy.node("unique name", **device, config=conf) -result = server.run(["cd /", "ls -la"]) -print(result) -``` -### Running parallel tasks -``` -import connpy -conf = connpy.configfile() -#You can get the nodes from the config from a folder and fitlering in it -nodes = conf.getitem("@office", ["router1", "router2", "router3"]) -#You can also get each node individually: -nodes = {} -nodes["router1"] = conf.getitem("router1@office") -nodes["router2"] = conf.getitem("router2@office") -nodes["router10"] = conf.getitem("router10@datacenter") -#Also, you can create the nodes manually: -nodes = {} -nodes["router1"] = {"host": "1.1.1.1", "user": "user", "password": "pass1"} -nodes["router2"] = {"host": "1.1.1.2", "user": "user", "password": "pass2"} -nodes["router3"] = {"host": "1.1.1.2", "user": "user", "password": "pass3"} -#Finally you run some tasks on the nodes -mynodes = connpy.nodes(nodes, config = conf) -result = mynodes.test(["show ip int br"], "1.1.1.2") -for i in result: - print("---" + i + "---") - print(result[i]) - print() -# Or for one specific node -mynodes.router1.run(["term len 0". "show run"], folder = "/home/user/logs") -``` -### Using variables -``` +### Parallel Tasks with Variables +```python import connpy config = connpy.configfile() -nodes = config.getitem("@office", ["router1", "router2", "router3"]) -commands = [] -commands.append("config t") -commands.append("interface lo {id}") -commands.append("ip add {ip} {mask}") -commands.append("end") -variables = {} -variables["router1@office"] = {"ip": "10.57.57.1"} -variables["router2@office"] = {"ip": "10.57.57.2"} -variables["router3@office"] = {"ip": "10.57.57.3"} -variables["__global__"] = {"id": "57"} -variables["__global__"]["mask"] = "255.255.255.255" -expected = "!" -routers = connpy.nodes(nodes, config = config) -routers.run(commands, variables) -routers.test("ping {ip}", expected, variables) -for key in routers.result: - print(key, ' ---> ', ("pass" if routers.result[key] else "fail")) -``` -### Using AI +nodes = config.getitem("@office", ["router1", "router2"]) +routers = connpy.nodes(nodes, config=config) + +variables = { + "router1@office": {"id": "1"}, + "__global__": {"mask": "255.255.255.0"} +} +routers.run(["interface lo{id}", "ip address 10.0.0.{id} {mask}"], variables) ``` + +### AI Programmatic Use +```python import connpy -conf = connpy.configfile() -# Uses models and API keys from config, or override them: -myai = connpy.ai(conf, engineer_model="gemini/gemini-2.5-flash", engineer_api_key="your-key") -result = myai.ask("go to router1 and show me the running configuration") -print(result["response"]) -# Streaming is enabled by default for CLI, disable for programmatic use: -result = myai.ask("show interfaces on all routers", stream=False) -print(result["response"]) +myai = connpy.ai(connpy.configfile()) +response = myai.ask("What is the status of the BGP neighbors in the office?") ``` -#### AI Plugin Tool Registration -Plugins can register custom tools with the AI system using `register_ai_tool()` in their `Preload` class: -``` -def _register_my_tools(ai_instance): - tool_def = { - "type": "function", - "function": { - "name": "my_custom_tool", - "description": "Does something useful.", - "parameters": { - "type": "object", - "properties": {"query": {"type": "string"}}, - "required": ["query"] - } - } - } - ai_instance.register_ai_tool( - tool_definition=tool_def, - handler=my_handler_function, - target="engineer", # or "architect" or "both" - engineer_prompt="- My tool: does X.", - architect_prompt=" * My tool (my_custom_tool)." - ) - -class Preload: - def __init__(self, connapp): - connapp.ai.modify(_register_my_tools) -``` - -## Developer Notes (SOA Architecture) -As of version 2.0, Connpy has migrated to a **Service-Oriented Architecture (SOA)**: -- **`connpy/cli/`**: Contains all CLI handlers. These are responsible for argument parsing, user interaction (via `inquirer`), and visual output (via `printer`). -- **`connpy/services/`**: Contains pure logic services (Node, Profile, Execution, etc.). -- **Zero-Print Policy**: Services must never use `print()`. All output must be returned as data structures or generators to the caller (CLI handlers). -- **ServiceProvider**: Access services via `connapp.services`. This allows transparent switching between local and remote (gRPC) backends without modifying CLI logic. +--- +*For detailed developer notes and plugin hooks documentation, see the [Documentation](https://fluzzi.github.io/connpy/).* ''' from .core import node,nodes from .configfile import configfile diff --git a/connpy/_version.py b/connpy/_version.py index 2718f39..a33395c 100644 --- a/connpy/_version.py +++ b/connpy/_version.py @@ -1 +1 @@ -__version__ = "6.0.0b7" +__version__ = "6.0.0b8" diff --git a/connpy/ai.py b/connpy/ai.py index 09d100d..e987b21 100755 --- a/connpy/ai.py +++ b/connpy/ai.py @@ -118,7 +118,7 @@ class ai: aiconfig = self.config.config.get("ai", {}) # Modelos (Prioridad: Argumento -> Config -> Default) - self.engineer_model = engineer_model or aiconfig.get("engineer_model") or "gemini/gemini-3.1-flash-lite-preview" + self.engineer_model = engineer_model or aiconfig.get("engineer_model") or "gemini/gemini-3.1-flash-lite" self.architect_model = architect_model or aiconfig.get("architect_model") or "anthropic/claude-sonnet-4-6" # API Keys (Prioridad: Argumento -> Config) @@ -1303,6 +1303,8 @@ class ai: node_info = node_info or {} os_info = node_info.get("os", "unknown") node_name = node_info.get("name", "unknown") + persona = node_info.get("persona", "engineer") + memories = node_info.get("memories", []) vendor_reference = "" if os_info and os_info != "unknown": @@ -1315,7 +1317,31 @@ class ai: except Exception: pass - system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session. + if persona == "architect": + system_prompt = f"""Role: NETWORK ARCHITECT. You act as a senior strategic advisor during a live SSH session. +Rules: +1. Answer the user's question directly based on the Terminal Context. +2. Focus on the "why" and "how". Analyze topologies, design patterns, and validate configurations. +3. Do NOT provide commands to execute unless specifically requested. Instead, explain the consequences and best practices. +4. Keep your guide concise and authoritative. +5. You MUST output your response in the following strict format: + +Your brief tactical guide in markdown. + + + + +low + +6. Risk level is usually "low" for read-only/no commands. + +Terminal Context: +{terminal_buffer} + +Device OS: {os_info} +Node: {node_name}""" + else: + system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session. Rules: 1. Answer the user's question directly based on the Terminal Context. 2. If the user asks you to analyze, parse, or extract data from the Terminal Context, DO IT directly in the section (you can use markdown tables or lists). Do NOT just give them a command to do it themselves. @@ -1343,6 +1369,11 @@ Node: {node_name}""" if vendor_reference: system_prompt += f"\n\nVendor Command Reference:\n{vendor_reference}" + if memories: + system_prompt += "\n\nSession Memory (Important Facts):\n" + for m in memories: + system_prompt += f"- {m}\n" + # Fetch MCP tools for the current OS mcp_tools = [] try: @@ -1362,14 +1393,18 @@ Node: {node_name}""" iteration = 0 max_iterations = 5 # Allow up to 5 iterations for tool usage + # Use models based on persona + current_model = self.architect_model if persona == "architect" else self.engineer_model + current_key = self.architect_key if persona == "architect" else self.engineer_key + try: while iteration < max_iterations: iteration += 1 response = await acompletion( - model=self.engineer_model, + model=current_model, messages=messages, tools=mcp_tools if mcp_tools else None, - api_key=self.engineer_key, + api_key=current_key, stream=True ) diff --git a/connpy/cli/run_handler.py b/connpy/cli/run_handler.py index 1fd7b1f..5d92489 100644 --- a/connpy/cli/run_handler.py +++ b/connpy/cli/run_handler.py @@ -121,7 +121,7 @@ class RunHandler: commands=commands, variables=variables, parallel=options.get("parallel", 10), - timeout=options.get("timeout", 10), + timeout=options.get("timeout", 20), folder=folder, prompt=prompt, on_node_complete=_on_run_complete @@ -155,7 +155,7 @@ class RunHandler: expected=expected, variables=variables, parallel=options.get("parallel", 10), - timeout=options.get("timeout", 10), + timeout=options.get("timeout", 20), folder=folder, prompt=prompt, on_node_complete=_on_test_complete diff --git a/connpy/cli/terminal_ui.py b/connpy/cli/terminal_ui.py index 3b66061..e80967c 100644 --- a/connpy/cli/terminal_ui.py +++ b/connpy/cli/terminal_ui.py @@ -1,6 +1,7 @@ import os import re import sys +import time import asyncio import fcntl import termios @@ -22,19 +23,37 @@ from connpy.utils import log_cleaner from ..services.ai_service import AIService class CopilotInterface: - def __init__(self, config, history=None, pt_input=None, pt_output=None, rich_file=None): + def __init__(self, config, history=None, pt_input=None, pt_output=None, rich_file=None, session_state=None): self.config = config self.history = history or InMemoryHistory() self.pt_input = pt_input self.pt_output = pt_output self.ai_service = AIService(config) - + self.session_state = session_state if session_state is not None else { + 'persona': 'engineer', + 'trust_mode': False, + 'memories': [], + 'os': None, + 'prompt': None + } + if rich_file: self.console = Console(theme=connpy_theme, force_terminal=True, file=rich_file) else: self.console = Console(theme=connpy_theme) - - self.mode_range, self.mode_single, self.mode_lines = 0, 1, 2 + + self.mode_range, self.mode_single, self.mode_lines = 0, 1, 2 + + def _get_theme_color(self, style_name: str, fallback: str = "white") -> str: + """Extract Hex or ANSI color name from the active rich theme.""" + try: + style = connpy_theme.styles.get(style_name) + if style and style.color: + # If it's a standard color like 'green', Rich might return its hex triplet + if style.color.is_default: return fallback + return style.color.triplet.hex if style.color.triplet else style.color.name + except: pass + return fallback async def run_session(self, raw_bytes: bytes, @@ -60,7 +79,9 @@ class CopilotInterface: 'total_lines': len(buffer.split('\n')), 'context_lines': min(50, len(buffer.split('\n'))), 'context_mode': self.mode_range, - 'cancelled': False + 'cancelled': False, + 'toolbar_msg': '', + 'msg_expiry': 0 } # 1. Visual Separation @@ -90,8 +111,13 @@ class CopilotInterface: event.app.invalidate() @bindings.add('tab') def _(event): - state['context_mode'] = (state['context_mode'] + 1) % 3 - event.app.invalidate() + buf = event.current_buffer + # If typing a slash command (no spaces yet), use tab to autocomplete inline + if buf.text.startswith('/') and ' ' not in buf.text: + buf.complete_next() + else: + state['context_mode'] = (state['context_mode'] + 1) % 3 + event.app.invalidate() @bindings.add('escape', eager=True) @bindings.add('c-c') def _(event): @@ -111,154 +137,302 @@ class CopilotInterface: return preview + "\n" + log_cleaner(active_raw.decode(errors='replace')) def get_prompt_text(): + import html + # Always use user_prompt color for the Ask prompt + color = self._get_theme_color("user_prompt", "cyan") + if state['context_mode'] == self.mode_lines: - return HTML(f"Ask [Ctx: {state['context_lines']}/{state['total_lines']}L]: ") + text = html.escape(f"Ask [Ctx: {state['context_lines']}/{state['total_lines']}L]: ") + return HTML(f'') active = get_active_buffer() lines_count = len(active.split('\n')) mode_str = {self.mode_range: "Range", self.mode_single: "Cmd"}[state['context_mode']] - return HTML(f"Ask [{mode_str} {state['context_cmd']} ~{lines_count}L]: ") + text = html.escape(f"Ask [{mode_str} {state['context_cmd']} ~{lines_count}L]: ") + return HTML(f'') + + from prompt_toolkit.application.current import get_app def get_toolbar(): + import html + app = get_app() + c_warning = self._get_theme_color("warning", "yellow") + + if app and app.current_buffer: + text = app.current_buffer.text + # Solo mostrar ayuda de comandos si estamos escribiendo el primer comando y no hay espacios + if text.startswith('/') and ' ' not in text: + commands = ['/os', '/prompt', '/architect', '/engineer', '/trust', '/untrust', '/memorize', '/clear'] + matches = [c for c in commands if c.startswith(text.lower())] + if matches: + m_text = html.escape(f"Available: {' '.join(matches)}") + return HTML(f'' + " " * 20) + m_label = {self.mode_range: "RANGE", self.mode_single: "SINGLE", self.mode_lines: "LINES"}[state['context_mode']] if state['context_mode'] == self.mode_lines: - return HTML(f"\u25b6 Ctrl+\u2191/\u2193 adjusts by 50 lines [Tab: {m_label}]") - idx = max(0, state['total_cmds'] - state['context_cmd']) - return HTML(f"\u25b6 {blocks[idx][1]} [Tab: {m_label}]") + base_str = f'\u25b6 Ctrl+\u2191/\u2193 adjusts by 50 lines [Tab: {m_label}]' + else: + idx = max(0, state['total_cmds'] - state['context_cmd']) + desc = blocks[idx][1] + base_str = f'\u25b6 {desc} [Tab: {m_label}]' + + # Wrap base_str in a style to maintain consistency and avoid glitches + # The fg color will be inherited from bottom-toolbar global style if not specified here + base_html = f'{html.escape(base_str)}' + + res_html = base_html + if state.get('toolbar_msg'): + if time.time() < state.get('msg_expiry', 0): + msg = html.escape(state['toolbar_msg']) + res_html = f' | ' + base_html + else: + state['toolbar_msg'] = '' + + # Pad with spaces to ensure the line is cleared when the message disappears + return HTML(res_html + " " * 20) - # 2. Ask question - session = PromptSession(history=self.history) - try: - # Usamos un try/finally interno para asegurar que si algo falla en prompt_async, - # no nos quedemos con la terminal en un estado extraΓ±o. - question = await session.prompt_async( - get_prompt_text, - key_bindings=bindings, - bottom_toolbar=get_toolbar + from prompt_toolkit.completion import Completer, Completion + class SlashCommandCompleter(Completer): + def get_completions(self, document, complete_event): + text = document.text_before_cursor + if text.startswith('/'): + parts = text.split() + # Only autocomplete the first word + if len(parts) <= 1 or (len(parts) == 1 and not text.endswith(' ')): + cmd_part = parts[0] if parts else text + commands = [ + ('/os', 'Set device OS (e.g. cisco_ios)'), + ('/prompt', 'Override prompt regex'), + ('/architect', 'Switch to Architect persona'), + ('/engineer', 'Switch to Engineer persona'), + ('/trust', 'Enable auto-execute'), + ('/untrust', 'Disable auto-execute'), + ('/memorize', 'Add fact to memory'), + ('/clear', 'Clear memory') + ] + for cmd, desc in commands: + if cmd.startswith(cmd_part.lower()): + yield Completion(cmd, start_position=-len(cmd_part), display_meta=desc) + + copilot_completer = SlashCommandCompleter() + + while True: + # 2. Ask question + from prompt_toolkit.styles import Style + c_contrast = self._get_theme_color("contrast", "gray") + ui_style = Style.from_dict({ + 'bottom-toolbar': f'fg:{c_contrast}', + }) + + session = PromptSession( + history=self.history, + input=self.pt_input, + output=self.pt_output, + completer=copilot_completer, + reserve_space_for_menu=0, + style=ui_style ) - except (KeyboardInterrupt, EOFError): - state['cancelled'] = True - question = "" - - if state['cancelled'] or not question.strip() or question.strip().lower() == 'cancel': - return "cancel", None, None - - # Enrich question - past = self.history.get_strings() - if len(past) > 1: - history_text = "\n".join(f"- {q}" for q in past[-6:-1]) - question = f"Previous questions:\n{history_text}\n\nCurrent Question:\n{question}" - - # 3. AI Execution - active_buffer = get_active_buffer() - live_text = "Thinking..." - panel = Panel(live_text, title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan") - - def on_chunk(text): - nonlocal live_text - if live_text == "Thinking...": live_text = "" - live_text += text - - with Live(panel, console=self.console, refresh_per_second=10) as live: - def update_live(t): - live.update(Panel(Markdown(t), title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan")) - - wrapped_chunk = lambda t: (on_chunk(t), update_live(live_text)) - - # Check for interruption during AI call - ai_task = asyncio.create_task(on_ai_call(active_buffer, question, wrapped_chunk)) - try: - while not ai_task.done(): - await asyncio.sleep(0.05) - result = await ai_task - except asyncio.CancelledError: + # Usamos un try/finally interno para asegurar que si algo falla en prompt_async, + # no nos quedemos con la terminal en un estado extraΓ±o. + question = await session.prompt_async( + get_prompt_text, + key_bindings=bindings, + bottom_toolbar=get_toolbar + ) + except (KeyboardInterrupt, EOFError): + state['cancelled'] = True + question = "" + + if state['cancelled'] or not question.strip() or question.strip().lower() in ['cancel', 'exit', 'quit']: return "cancel", None, None - if not result or result.get("error"): - if result and result.get("error"): self.console.print(f"[red]Error: {result['error']}[/red]") - return "cancel", None, None - - # 4. Handle result - if live_text == "Thinking..." and result.get("guide"): - self.console.print(Panel(Markdown(result["guide"]), title="[bold cyan]Copilot Guide[/bold cyan]", border_style="cyan")) - - commands = result.get("commands", []) - if not commands: - return "cancel", None, None - - risk = result.get("risk_level", "low") - style = {"low": "green", "high": "yellow", "destructive": "red"}.get(risk, "green") - cmd_text = "\n".join(f" {i+1}. {c}" for i, c in enumerate(commands)) - self.console.print(Panel(cmd_text, title=f"[bold {style}]Suggested Commands [{risk.upper()}][/bold {style}]", border_style=style)) - - confirm_session = PromptSession() - c_bindings = KeyBindings() - @c_bindings.add('escape', eager=True) - @c_bindings.add('c-c') - def _(ev): ev.app.exit(result='n') - - try: - action = await confirm_session.prompt_async(HTML(f"Send? (y/n/e/range) [n]: "), key_bindings=c_bindings) - except (KeyboardInterrupt, EOFError): - action = "n" - - def parse_indices(text, max_len): - """Helper to parse '1-3, 5, 7' into [0, 1, 2, 4, 6].""" - indices = [] - # Replace commas with spaces and split - parts = text.replace(',', ' ').split() - for part in parts: - if '-' in part: - try: - start, end = map(int, part.split('-')) - # Ensure inclusive and 0-indexed - indices.extend(range(start-1, end)) - except: continue - elif part.isdigit(): - indices.append(int(part)-1) - # Filter valid indices and remove duplicates - return [i for i in sorted(set(indices)) if 0 <= i < max_len] - - action_l = (action or "n").lower().strip() - if action_l in ('y', 'yes', 'all'): - return "send_all", commands, None - - # Check for numeric selection (e.g., "1, 2-4") - if re.match(r'^[0-9,\-\s]+$', action_l): - selected_idxs = parse_indices(action_l, len(commands)) - if selected_idxs: - return "send_all", [commands[i] for i in selected_idxs], None - - elif action_l.startswith('e'): - # Check if it's a selective edit like 'e1-2' - selection_str = action_l[1:].strip() - if selection_str: - idxs = parse_indices(selection_str, len(commands)) - cmds_to_edit = [commands[i] for i in idxs] if idxs else commands - else: - cmds_to_edit = commands - - target = "\n".join(cmds_to_edit) - e_bindings = KeyBindings() - @e_bindings.add('c-j') - def _(ev): ev.app.exit(result=ev.app.current_buffer.text) - @e_bindings.add('escape', 'enter') - def _(ev): ev.app.exit(result=ev.app.current_buffer.text) - @e_bindings.add('escape') - def _(ev): ev.app.exit(result='') + # 3. Process Input via AIService + directive = self.ai_service.process_copilot_input(question, self.session_state) - edited = await confirm_session.prompt_async( - HTML("Edit (Ctrl+Enter or Esc+Enter to submit):\n"), - default=target, multiline=True, key_bindings=e_bindings - ) - if edited.strip(): - # Split by lines to ensure core.py applies delay between each command - lines = [l.strip() for l in edited.split('\n') if l.strip()] - return "custom", None, lines - return "cancel", None, None + if directive["action"] == "state_update": + state['toolbar_msg'] = directive['message'] + state['msg_expiry'] = time.time() + 3 # 3 seconds timeout + + async def delayed_refresh(): + await asyncio.sleep(3.1) + # Only invalidate if the message hasn't been replaced by a newer one + if state.get('toolbar_msg') == directive['message']: + state['toolbar_msg'] = '' # Explicitly clear + try: + from prompt_toolkit.application.current import get_app + app = get_app() + if app: app.invalidate() + except: pass + asyncio.create_task(delayed_refresh()) + + # Mover el cursor arriba y limpiar la lΓ­nea para que el nuevo prompt reemplace al anterior + sys.stdout.write('\x1b[1A\x1b[2K') + sys.stdout.flush() + continue + else: + # Limpiar el mensaje de la barra cuando se hace una pregunta real + state['toolbar_msg'] = '' + + clean_question = directive.get("clean_prompt", question) + overrides = directive.get("overrides", {}) + + # Merge node_info with session_state and overrides + merged_node_info = node_info.copy() + if self.session_state['os']: merged_node_info['os'] = self.session_state['os'] + if self.session_state['prompt']: merged_node_info['prompt'] = self.session_state['prompt'] + merged_node_info['persona'] = self.session_state['persona'] + merged_node_info['trust'] = self.session_state['trust_mode'] + merged_node_info['memories'] = list(self.session_state['memories']) + + for k, v in overrides.items(): + merged_node_info[k] = v + + # Enrich question + past = self.history.get_strings() + if len(past) > 1: + clean_past = [q for q in past[-6:-1] if not q.startswith('/')] + if clean_past: + history_text = "\n".join(f"- {q}" for q in clean_past) + clean_question = f"Previous questions:\n{history_text}\n\nCurrent Question:\n{clean_question}" + + # 3. AI Execution + # Use persona from overrides (one-shot) or from session state + active_persona = merged_node_info.get('persona', self.session_state.get('persona', 'engineer')) + persona_color = self._get_theme_color(active_persona, fallback="cyan") + + active_buffer = get_active_buffer() + live_text = "Thinking..." + panel = Panel(live_text, title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color) + + def on_chunk(text): + nonlocal live_text + if live_text == "Thinking...": live_text = "" + live_text += text + + with Live(panel, console=self.console, refresh_per_second=10) as live: + def update_live(t): + live.update(Panel(Markdown(t), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color)) + + wrapped_chunk = lambda t: (on_chunk(t), update_live(live_text)) + + # Check for interruption during AI call + ai_task = asyncio.create_task(on_ai_call(active_buffer, clean_question, wrapped_chunk, merged_node_info)) + + try: + while not ai_task.done(): + await asyncio.sleep(0.05) + result = await ai_task + except asyncio.CancelledError: + return "cancel", None, None + + if not result or result.get("error"): + if result and result.get("error"): self.console.print(f"[red]Error: {result['error']}[/red]") + return "cancel", None, None + + # 4. Handle result + if live_text == "Thinking..." and result.get("guide"): + self.console.print(Panel(Markdown(result["guide"]), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color)) + + commands = result.get("commands", []) + if not commands: + self.console.print("") + return "continue", None, None + + risk = result.get("risk_level", "low") + risk_style = {"low": "success", "high": "warning", "destructive": "error"}.get(risk, "success") + style_color = self._get_theme_color(risk_style, fallback="green") + + cmd_text = "\n".join(f" {i+1}. {c}" for i, c in enumerate(commands)) + # Explicitly use 'bold style_color' for both TITLE and BORDER to ensure maximum consistency + self.console.print(Panel(cmd_text, title=f"[bold {style_color}]Suggested Commands [{risk.upper()}][/bold {style_color}]", border_style=f"bold {style_color}")) + + if merged_node_info.get('trust', False) and risk != "destructive": + self.console.print(f"[dim]βš™οΈ Auto-executing (Trust Mode)[/dim]") + return "send_all", commands, None + + confirm_session = PromptSession(input=self.pt_input, output=self.pt_output) + c_bindings = KeyBindings() + @c_bindings.add('escape', eager=True) + @c_bindings.add('c-c') + def _(ev): ev.app.exit(result='n') + + import html + try: + p_text = html.escape(f"Send? (y/n/e/range) [n]: ") + # Use the EXACT same style_color and force bold="true" for Prompt-Toolkit + action = await confirm_session.prompt_async(HTML(f''), key_bindings=c_bindings) + except (KeyboardInterrupt, EOFError): + self.console.print("") + return "continue", None, None + + def parse_indices(text, max_len): + """Helper to parse '1-3, 5, 7' into [0, 1, 2, 4, 6].""" + indices = [] + # Replace commas with spaces and split + parts = text.replace(',', ' ').split() + for part in parts: + if '-' in part: + try: + start, end = map(int, part.split('-')) + # Ensure inclusive and 0-indexed + indices.extend(range(start-1, end)) + except: continue + elif part.isdigit(): + indices.append(int(part)-1) + # Filter valid indices and remove duplicates + return [i for i in sorted(set(indices)) if 0 <= i < max_len] + + action_l = (action or "n").lower().strip() + if action_l in ('y', 'yes', 'all'): + return "send_all", commands, None + + # Check for numeric selection (e.g., "1, 2-4") + if re.match(r'^[0-9,\-\s]+$', action_l): + selected_idxs = parse_indices(action_l, len(commands)) + if selected_idxs: + return "send_all", [commands[i] for i in selected_idxs], None + + elif action_l.startswith('e'): + # Check if it's a selective edit like 'e1-2' + selection_str = action_l[1:].strip() + if selection_str: + idxs = parse_indices(selection_str, len(commands)) + cmds_to_edit = [commands[i] for i in idxs] if idxs else commands + else: + cmds_to_edit = commands + + target = "\n".join(cmds_to_edit) + e_bindings = KeyBindings() + @e_bindings.add('c-j') + def _(ev): ev.app.exit(result=ev.app.current_buffer.text) + @e_bindings.add('escape', 'enter') + def _(ev): ev.app.exit(result=ev.app.current_buffer.text) + @e_bindings.add('escape') + def _(ev): ev.app.exit(result='') + + c_edit = self._get_theme_color("user_prompt", "cyan") + import html + e_text = html.escape("Edit (Ctrl+Enter or Esc+Enter to submit):\n") + try: + edited = await confirm_session.prompt_async( + HTML(f''), + default=target, multiline=True, key_bindings=e_bindings + ) + except (KeyboardInterrupt, EOFError): + self.console.print("") + return "continue", None, None + + if edited and edited.strip(): + # Split by lines to ensure core.py applies delay between each command + lines = [l.strip() for l in edited.split('\n') if l.strip()] + return "custom", None, lines + + self.console.print("") + return "continue", None, None return "cancel", None, None finally: + state['cancelled'] = True self.console.print("[dim]Returning to session...[/dim]") diff --git a/connpy/completion.py b/connpy/completion.py index 6de9e27..502b63d 100755 --- a/connpy/completion.py +++ b/connpy/completion.py @@ -169,11 +169,21 @@ def _build_tree(nodes, folders, profiles, plugins, configdir): } # State Machine Definitions + mcp_dict = { + "list": None, + "add": {"*": {"*": {"*": None}}}, # name url [os] + "remove": {"*": None}, + "enable": {"*": None}, + "disable": {"*": None}, + "--help": None, "-h": None + } + ai_dict = {"__exclude_used__": True, "--help": None, "-h": None} for opt in ["--engineer-model", "--engineer-api-key", "--architect-model", "--architect-api-key"]: ai_dict[opt] = {"*": ai_dict} # takes value, loops back for opt in ["--debug", "--trust", "--list", "--list-sessions", "--session", "--resume", "--delete", "--delete-session", "-y"]: ai_dict[opt] = ai_dict # takes no value, loops back + ai_dict["--mcp"] = mcp_dict ai_dict["*"] = ai_dict mv_state = {"__extra__": _nodes, "--help": None, "-h": None} diff --git a/connpy/connapp.py b/connpy/connapp.py index 25793be..41a3f8d 100755 --- a/connpy/connapp.py +++ b/connpy/connapp.py @@ -89,6 +89,10 @@ class connapp: if hasattr(self.services.nodes, "list_folders") and hasattr(self.services.nodes.list_folders, "register_post_hook"): self.services.nodes.list_folders.register_post_hook(self.services.context.filter_node_list) + # Apply theme from config if exists before remote connection attempts + user_theme = self.config.config.get("theme", {}) + self._apply_app_theme(user_theme) + # Populate data via services try: self.nodes_list = self.services.nodes.list_nodes() @@ -151,10 +155,6 @@ class connapp: return kwargs.get("result") configfile._saveconfig.register_post_hook(auto_sync_hook) - - # Apply theme from config if exists - user_theme = self.config.config.get("theme", {}) - self._apply_app_theme(user_theme) def _apply_app_theme(self, styles): """Unified method to apply theme to printer and help formatter.""" diff --git a/connpy/core.py b/connpy/core.py index ae9152a..c4dcad2 100755 --- a/connpy/core.py +++ b/connpy/core.py @@ -35,8 +35,6 @@ def copilot_terminal_mode(): new_settings[1] = new_settings[1] | termios.OPOST termios.tcsetattr(fd, termios.TCSANOW, new_settings) - yield - except Exception: yield finally: try: @@ -610,20 +608,24 @@ class node: async def handler(buffer, node_info, stream, child_fd, cmd_byte_positions=None): try: - interface = CopilotInterface(config, history=getattr(stream, 'copilot_history', None)) + interface = CopilotInterface( + config, + history=getattr(stream, 'copilot_history', None), + session_state=getattr(stream, 'copilot_state', None) + ) # Save history back to stream for persistence in current session stream.copilot_history = interface.history + stream.copilot_state = interface.session_state ai_service = AIService(config) - async def on_ai_call(active_buffer, question, chunk_callback): + async def on_ai_call(active_buffer, question, chunk_callback, merged_node_info): return await ai_service.aask_copilot( - active_buffer, - question, - node_info=node_info, + active_buffer, + question, + node_info=merged_node_info, chunk_callback=chunk_callback ) - # Get raw bytes from BytesIO raw_bytes = self.mylog.getvalue() @@ -637,12 +639,16 @@ class node: try: with copilot_terminal_mode(): - action, commands, custom_cmd = await interface.run_session( - raw_bytes=raw_bytes, - cmd_byte_positions=cmd_byte_positions, - node_info=node_info, - on_ai_call=on_ai_call - ) + while True: + action, commands, custom_cmd = await interface.run_session( + raw_bytes=raw_bytes, + cmd_byte_positions=cmd_byte_positions, + node_info=node_info, + on_ai_call=on_ai_call + ) + if action == "continue": + continue + break finally: # Reiniciar el lector de la terminal para volver al modo interactivo SSH/Telnet if hasattr(stream, 'start_reading'): diff --git a/connpy/grpc_layer/connpy_pb2.py b/connpy/grpc_layer/connpy_pb2.py index a443c02..2c33451 100644 --- a/connpy/grpc_layer/connpy_pb2.py +++ b/connpy/grpc_layer/connpy_pb2.py @@ -26,7 +26,7 @@ from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x63onnpy/proto/connpy.proto\x12\x06\x63onnpy\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xdc\x01\n\x0fInteractRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04sftp\x18\x02 \x01(\x08\x12\r\n\x05\x64\x65\x62ug\x18\x03 \x01(\x08\x12\x12\n\nstdin_data\x18\x04 \x01(\x0c\x12\x0c\n\x04\x63ols\x18\x05 \x01(\x05\x12\x0c\n\x04rows\x18\x06 \x01(\x05\x12\x1e\n\x16\x63onnection_params_json\x18\x07 \x01(\t\x12\x18\n\x10\x63opilot_question\x18\x08 \x01(\t\x12\x16\n\x0e\x63opilot_action\x18\t \x01(\t\x12\x1e\n\x16\x63opilot_context_buffer\x18\n \x01(\t\"\x86\x02\n\x10InteractResponse\x12\x13\n\x0bstdout_data\x18\x01 \x01(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\x12\x16\n\x0e\x63opilot_prompt\x18\x04 \x01(\x08\x12\x1e\n\x16\x63opilot_buffer_preview\x18\x05 \x01(\t\x12\x1d\n\x15\x63opilot_response_json\x18\x06 \x01(\t\x12\x1e\n\x16\x63opilot_node_info_json\x18\x07 \x01(\t\x12\x1c\n\x14\x63opilot_stream_chunk\x18\x08 \x01(\t\x12 \n\x18\x63opilot_injected_command\x18\t \x01(\t\"7\n\rFilterRequest\x12\x12\n\nfilter_str\x18\x01 \x01(\t\x12\x12\n\nformat_str\x18\x02 \x01(\t\"5\n\rValueResponse\x12$\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\"\x17\n\tIdRequest\x12\n\n\x02id\x18\x01 \x01(\t\"S\n\x0bNodeRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tis_folder\x18\x03 \x01(\x08\".\n\rDeleteRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tis_folder\x18\x02 \x01(\x08\"\x1d\n\x0cMessageValue\x12\r\n\x05value\x18\x01 \x01(\t\";\n\x0bMoveRequest\x12\x0e\n\x06src_id\x18\x01 \x01(\t\x12\x0e\n\x06\x64st_id\x18\x02 \x01(\t\x12\x0c\n\x04\x63opy\x18\x03 \x01(\x08\"W\n\x0b\x42ulkRequest\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05hosts\x18\x02 \x03(\t\x12,\n\x0b\x63ommon_data\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"7\n\x0eStructResponse\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"/\n\x0eProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07resolve\x18\x02 \x01(\x08\"6\n\rStructRequest\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x1e\n\rStringRequest\x12\r\n\x05value\x18\x01 \x01(\t\"\x1f\n\x0eStringResponse\x12\r\n\x05value\x18\x01 \x01(\t\"C\n\rUpdateRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\"B\n\rPluginRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\x0e\n\x06update\x18\x03 \x01(\x08\"\xa5\x01\n\nRunRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x0e\n\x06\x66older\x18\x03 \x01(\t\x12\x0e\n\x06prompt\x18\x04 \x01(\t\x12\x10\n\x08parallel\x18\x05 \x01(\x05\x12%\n\x04vars\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x07 \x01(\x05\x12\x0c\n\x04name\x18\x08 \x01(\t\"\xb8\x01\n\x0bTestRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x10\n\x08\x65xpected\x18\x03 \x03(\t\x12\x0e\n\x06\x66older\x18\x04 \x01(\t\x12\x0e\n\x06prompt\x18\x05 \x01(\t\x12\x10\n\x08parallel\x18\x06 \x01(\x05\x12%\n\x04vars\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x08 \x01(\x05\x12\x0c\n\x04name\x18\t \x01(\t\"A\n\rScriptRequest\x12\x0e\n\x06param1\x18\x01 \x01(\t\x12\x0e\n\x06param2\x18\x02 \x01(\t\x12\x10\n\x08parallel\x18\x03 \x01(\x05\"3\n\rExportRequest\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x0f\n\x07\x66olders\x18\x02 \x03(\t\"\x1c\n\x0bListRequest\x12\r\n\x05items\x18\x01 \x03(\t\"\xa6\x02\n\nAskRequest\x12\x12\n\ninput_text\x18\x01 \x01(\t\x12\x0e\n\x06\x64ryrun\x18\x02 \x01(\x08\x12,\n\x0c\x63hat_history\x18\x03 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x12\n\nsession_id\x18\x04 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x05 \x01(\x08\x12\x16\n\x0e\x65ngineer_model\x18\x06 \x01(\t\x12\x18\n\x10\x65ngineer_api_key\x18\x07 \x01(\t\x12\x17\n\x0f\x61rchitect_model\x18\x08 \x01(\t\x12\x19\n\x11\x61rchitect_api_key\x18\t \x01(\t\x12\r\n\x05trust\x18\n \x01(\x08\x12\x1b\n\x13\x63onfirmation_answer\x18\x0b \x01(\t\x12\x11\n\tinterrupt\x18\x0c \x01(\x08\"\xc8\x01\n\nAIResponse\x12\x12\n\ntext_chunk\x18\x01 \x01(\t\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12,\n\x0b\x66ull_result\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x15\n\rstatus_update\x18\x04 \x01(\t\x12\x15\n\rdebug_message\x18\x05 \x01(\t\x12\x1d\n\x15requires_confirmation\x18\x06 \x01(\x08\x12\x19\n\x11important_message\x18\x07 \x01(\t\"\x1d\n\x0c\x42oolResponse\x12\r\n\x05value\x18\x01 \x01(\x08\"C\n\x0fProviderRequest\x12\x10\n\x08provider\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\t\x12\x0f\n\x07\x61pi_key\x18\x03 \x01(\t\"\x1b\n\nIntRequest\x12\r\n\x05value\x18\x01 \x01(\x05\"p\n\rNodeRunResult\x12\x11\n\tunique_id\x18\x01 \x01(\t\x12\x0e\n\x06output\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12,\n\x0btest_result\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\"m\n\x12\x46ullReplaceRequest\x12,\n\x0b\x63onnections\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08profiles\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"X\n\x0e\x43opilotRequest\x12\x17\n\x0fterminal_buffer\x18\x01 \x01(\t\x12\x15\n\ruser_question\x18\x02 \x01(\t\x12\x16\n\x0enode_info_json\x18\x03 \x01(\t\"U\n\x0f\x43opilotResponse\x12\x10\n\x08\x63ommands\x18\x01 \x03(\t\x12\r\n\x05guide\x18\x02 \x01(\t\x12\x12\n\nrisk_level\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\"a\n\nMCPRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x03 \x01(\x08\x12\x17\n\x0f\x61uto_load_on_os\x18\x04 \x01(\t\x12\x0e\n\x06remove\x18\x05 \x01(\x08\x32\xe1\x07\n\x0bNodeService\x12<\n\nlist_nodes\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12>\n\x0clist_folders\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x10get_node_details\x12\x11.connpy.IdRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0e\x65xplode_unique\x12\x11.connpy.IdRequest\x1a\x15.connpy.ValueResponse\"\x00\x12\x42\n\x0egenerate_cache\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x61\x64\x64_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x0bupdate_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12>\n\x0b\x64\x65lete_node\x12\x15.connpy.DeleteRequest\x1a\x16.google.protobuf.Empty\"\x00\x12:\n\tmove_node\x12\x13.connpy.MoveRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x62ulk_add\x12\x13.connpy.BulkRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\x16validate_parent_folder\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x12H\n\rinteract_node\x12\x17.connpy.InteractRequest\x1a\x18.connpy.InteractResponse\"\x00(\x01\x30\x01\x12\x44\n\x0c\x66ull_replace\x12\x1a.connpy.FullReplaceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\rget_inventory\x12\x16.google.protobuf.Empty\x1a\x1a.connpy.FullReplaceRequest\"\x00\x32\x96\x03\n\x0eProfileService\x12?\n\rlist_profiles\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x0bget_profile\x12\x16.connpy.ProfileRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0b\x61\x64\x64_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11resolve_node_data\x12\x15.connpy.StructRequest\x1a\x16.connpy.StructResponse\"\x00\x12=\n\x0e\x64\x65lete_profile\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12?\n\x0eupdate_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\xae\x03\n\rConfigService\x12@\n\x0cget_settings\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StructResponse\"\x00\x12\x43\n\x0fget_default_dir\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StringResponse\"\x00\x12\x44\n\x11set_config_folder\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x41\n\x0eupdate_setting\x12\x15.connpy.UpdateRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10\x65ncrypt_password\x12\x15.connpy.StringRequest\x1a\x16.connpy.StringResponse\"\x00\x12H\n\x15\x61pply_theme_from_file\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xca\x02\n\rPluginService\x12?\n\x0clist_plugins\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12=\n\nadd_plugin\x12\x15.connpy.PluginRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\rdelete_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\renable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x0e\x64isable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x9b\x02\n\x10\x45xecutionService\x12=\n\x0crun_commands\x12\x12.connpy.RunRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12?\n\rtest_commands\x12\x13.connpy.TestRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12\x41\n\x0erun_cli_script\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x12\x44\n\x11run_yaml_playbook\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xe2\x01\n\x13ImportExportService\x12\x41\n\x0e\x65xport_to_file\x12\x15.connpy.ExportRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10import_from_file\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x8f\x04\n\tAIService\x12\x33\n\x03\x61sk\x12\x12.connpy.AskRequest\x1a\x12.connpy.AIResponse\"\x00(\x01\x30\x01\x12\x38\n\x07\x63onfirm\x12\x15.connpy.StringRequest\x1a\x14.connpy.BoolResponse\"\x00\x12@\n\x0b\x61sk_copilot\x12\x16.connpy.CopilotRequest\x1a\x17.connpy.CopilotResponse\"\x00\x12@\n\rlist_sessions\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12\x41\n\x0e\x64\x65lete_session\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x12\x63onfigure_provider\x12\x17.connpy.ProviderRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\rconfigure_mcp\x12\x12.connpy.MCPRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11load_session_data\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xc2\x02\n\rSystemService\x12\x39\n\tstart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\tdebug_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x08stop_api\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12;\n\x0brestart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12@\n\x0eget_api_status\x12\x16.google.protobuf.Empty\x1a\x14.connpy.BoolResponse\"\x00\x62\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x63onnpy/proto/connpy.proto\x12\x06\x63onnpy\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xfc\x01\n\x0fInteractRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04sftp\x18\x02 \x01(\x08\x12\r\n\x05\x64\x65\x62ug\x18\x03 \x01(\x08\x12\x12\n\nstdin_data\x18\x04 \x01(\x0c\x12\x0c\n\x04\x63ols\x18\x05 \x01(\x05\x12\x0c\n\x04rows\x18\x06 \x01(\x05\x12\x1e\n\x16\x63onnection_params_json\x18\x07 \x01(\t\x12\x18\n\x10\x63opilot_question\x18\x08 \x01(\t\x12\x16\n\x0e\x63opilot_action\x18\t \x01(\t\x12\x1e\n\x16\x63opilot_context_buffer\x18\n \x01(\t\x12\x1e\n\x16\x63opilot_node_info_json\x18\r \x01(\t\"\x86\x02\n\x10InteractResponse\x12\x13\n\x0bstdout_data\x18\x01 \x01(\x0c\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\x12\x16\n\x0e\x63opilot_prompt\x18\x04 \x01(\x08\x12\x1e\n\x16\x63opilot_buffer_preview\x18\x05 \x01(\t\x12\x1d\n\x15\x63opilot_response_json\x18\x06 \x01(\t\x12\x1e\n\x16\x63opilot_node_info_json\x18\x07 \x01(\t\x12\x1c\n\x14\x63opilot_stream_chunk\x18\x08 \x01(\t\x12 \n\x18\x63opilot_injected_command\x18\t \x01(\t\"7\n\rFilterRequest\x12\x12\n\nfilter_str\x18\x01 \x01(\t\x12\x12\n\nformat_str\x18\x02 \x01(\t\"5\n\rValueResponse\x12$\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x16.google.protobuf.Value\"\x17\n\tIdRequest\x12\n\n\x02id\x18\x01 \x01(\t\"S\n\x0bNodeRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12%\n\x04\x64\x61ta\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tis_folder\x18\x03 \x01(\x08\".\n\rDeleteRequest\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\tis_folder\x18\x02 \x01(\x08\"\x1d\n\x0cMessageValue\x12\r\n\x05value\x18\x01 \x01(\t\";\n\x0bMoveRequest\x12\x0e\n\x06src_id\x18\x01 \x01(\t\x12\x0e\n\x06\x64st_id\x18\x02 \x01(\t\x12\x0c\n\x04\x63opy\x18\x03 \x01(\x08\"W\n\x0b\x42ulkRequest\x12\x0b\n\x03ids\x18\x01 \x03(\t\x12\r\n\x05hosts\x18\x02 \x03(\t\x12,\n\x0b\x63ommon_data\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\"7\n\x0eStructResponse\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"/\n\x0eProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07resolve\x18\x02 \x01(\x08\"6\n\rStructRequest\x12%\n\x04\x64\x61ta\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x1e\n\rStringRequest\x12\r\n\x05value\x18\x01 \x01(\t\"\x1f\n\x0eStringResponse\x12\r\n\x05value\x18\x01 \x01(\t\"C\n\rUpdateRequest\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\"B\n\rPluginRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0bsource_file\x18\x02 \x01(\t\x12\x0e\n\x06update\x18\x03 \x01(\x08\"\xa5\x01\n\nRunRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x0e\n\x06\x66older\x18\x03 \x01(\t\x12\x0e\n\x06prompt\x18\x04 \x01(\t\x12\x10\n\x08parallel\x18\x05 \x01(\x05\x12%\n\x04vars\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x07 \x01(\x05\x12\x0c\n\x04name\x18\x08 \x01(\t\"\xb8\x01\n\x0bTestRequest\x12\r\n\x05nodes\x18\x01 \x03(\t\x12\x10\n\x08\x63ommands\x18\x02 \x03(\t\x12\x10\n\x08\x65xpected\x18\x03 \x03(\t\x12\x0e\n\x06\x66older\x18\x04 \x01(\t\x12\x0e\n\x06prompt\x18\x05 \x01(\t\x12\x10\n\x08parallel\x18\x06 \x01(\x05\x12%\n\x04vars\x18\x07 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x0f\n\x07timeout\x18\x08 \x01(\x05\x12\x0c\n\x04name\x18\t \x01(\t\"A\n\rScriptRequest\x12\x0e\n\x06param1\x18\x01 \x01(\t\x12\x0e\n\x06param2\x18\x02 \x01(\t\x12\x10\n\x08parallel\x18\x03 \x01(\x05\"3\n\rExportRequest\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x0f\n\x07\x66olders\x18\x02 \x03(\t\"\x1c\n\x0bListRequest\x12\r\n\x05items\x18\x01 \x03(\t\"\xa6\x02\n\nAskRequest\x12\x12\n\ninput_text\x18\x01 \x01(\t\x12\x0e\n\x06\x64ryrun\x18\x02 \x01(\x08\x12,\n\x0c\x63hat_history\x18\x03 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x12\n\nsession_id\x18\x04 \x01(\t\x12\r\n\x05\x64\x65\x62ug\x18\x05 \x01(\x08\x12\x16\n\x0e\x65ngineer_model\x18\x06 \x01(\t\x12\x18\n\x10\x65ngineer_api_key\x18\x07 \x01(\t\x12\x17\n\x0f\x61rchitect_model\x18\x08 \x01(\t\x12\x19\n\x11\x61rchitect_api_key\x18\t \x01(\t\x12\r\n\x05trust\x18\n \x01(\x08\x12\x1b\n\x13\x63onfirmation_answer\x18\x0b \x01(\t\x12\x11\n\tinterrupt\x18\x0c \x01(\x08\"\xc8\x01\n\nAIResponse\x12\x12\n\ntext_chunk\x18\x01 \x01(\t\x12\x10\n\x08is_final\x18\x02 \x01(\x08\x12,\n\x0b\x66ull_result\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x15\n\rstatus_update\x18\x04 \x01(\t\x12\x15\n\rdebug_message\x18\x05 \x01(\t\x12\x1d\n\x15requires_confirmation\x18\x06 \x01(\x08\x12\x19\n\x11important_message\x18\x07 \x01(\t\"\x1d\n\x0c\x42oolResponse\x12\r\n\x05value\x18\x01 \x01(\x08\"C\n\x0fProviderRequest\x12\x10\n\x08provider\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\t\x12\x0f\n\x07\x61pi_key\x18\x03 \x01(\t\"\x1b\n\nIntRequest\x12\r\n\x05value\x18\x01 \x01(\x05\"p\n\rNodeRunResult\x12\x11\n\tunique_id\x18\x01 \x01(\t\x12\x0e\n\x06output\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\x05\x12,\n\x0btest_result\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\"m\n\x12\x46ullReplaceRequest\x12,\n\x0b\x63onnections\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\x12)\n\x08profiles\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"X\n\x0e\x43opilotRequest\x12\x17\n\x0fterminal_buffer\x18\x01 \x01(\t\x12\x15\n\ruser_question\x18\x02 \x01(\t\x12\x16\n\x0enode_info_json\x18\x03 \x01(\t\"U\n\x0f\x43opilotResponse\x12\x10\n\x08\x63ommands\x18\x01 \x03(\t\x12\r\n\x05guide\x18\x02 \x01(\t\x12\x12\n\nrisk_level\x18\x03 \x01(\t\x12\r\n\x05\x65rror\x18\x04 \x01(\t\"a\n\nMCPRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\x12\x0f\n\x07\x65nabled\x18\x03 \x01(\x08\x12\x17\n\x0f\x61uto_load_on_os\x18\x04 \x01(\t\x12\x0e\n\x06remove\x18\x05 \x01(\x08\x32\xe1\x07\n\x0bNodeService\x12<\n\nlist_nodes\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12>\n\x0clist_folders\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x10get_node_details\x12\x11.connpy.IdRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0e\x65xplode_unique\x12\x11.connpy.IdRequest\x1a\x15.connpy.ValueResponse\"\x00\x12\x42\n\x0egenerate_cache\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x61\x64\x64_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x0bupdate_node\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12>\n\x0b\x64\x65lete_node\x12\x15.connpy.DeleteRequest\x1a\x16.google.protobuf.Empty\"\x00\x12:\n\tmove_node\x12\x13.connpy.MoveRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\x08\x62ulk_add\x12\x13.connpy.BulkRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\x16validate_parent_folder\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x12H\n\rinteract_node\x12\x17.connpy.InteractRequest\x1a\x18.connpy.InteractResponse\"\x00(\x01\x30\x01\x12\x44\n\x0c\x66ull_replace\x12\x1a.connpy.FullReplaceRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x45\n\rget_inventory\x12\x16.google.protobuf.Empty\x1a\x1a.connpy.FullReplaceRequest\"\x00\x32\x96\x03\n\x0eProfileService\x12?\n\rlist_profiles\x12\x15.connpy.FilterRequest\x1a\x15.connpy.ValueResponse\"\x00\x12?\n\x0bget_profile\x12\x16.connpy.ProfileRequest\x1a\x16.connpy.StructResponse\"\x00\x12<\n\x0b\x61\x64\x64_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11resolve_node_data\x12\x15.connpy.StructRequest\x1a\x16.connpy.StructResponse\"\x00\x12=\n\x0e\x64\x65lete_profile\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12?\n\x0eupdate_profile\x12\x13.connpy.NodeRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\xae\x03\n\rConfigService\x12@\n\x0cget_settings\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StructResponse\"\x00\x12\x43\n\x0fget_default_dir\x12\x16.google.protobuf.Empty\x1a\x16.connpy.StringResponse\"\x00\x12\x44\n\x11set_config_folder\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x41\n\x0eupdate_setting\x12\x15.connpy.UpdateRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10\x65ncrypt_password\x12\x15.connpy.StringRequest\x1a\x16.connpy.StringResponse\"\x00\x12H\n\x15\x61pply_theme_from_file\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xca\x02\n\rPluginService\x12?\n\x0clist_plugins\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12=\n\nadd_plugin\x12\x15.connpy.PluginRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\rdelete_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\renable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\x0e\x64isable_plugin\x12\x11.connpy.IdRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x9b\x02\n\x10\x45xecutionService\x12=\n\x0crun_commands\x12\x12.connpy.RunRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12?\n\rtest_commands\x12\x13.connpy.TestRequest\x1a\x15.connpy.NodeRunResult\"\x00\x30\x01\x12\x41\n\x0erun_cli_script\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x12\x44\n\x11run_yaml_playbook\x12\x15.connpy.ScriptRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xe2\x01\n\x13ImportExportService\x12\x41\n\x0e\x65xport_to_file\x12\x15.connpy.ExportRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x10import_from_file\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x43\n\x12set_reserved_names\x12\x13.connpy.ListRequest\x1a\x16.google.protobuf.Empty\"\x00\x32\x8f\x04\n\tAIService\x12\x33\n\x03\x61sk\x12\x12.connpy.AskRequest\x1a\x12.connpy.AIResponse\"\x00(\x01\x30\x01\x12\x38\n\x07\x63onfirm\x12\x15.connpy.StringRequest\x1a\x14.connpy.BoolResponse\"\x00\x12@\n\x0b\x61sk_copilot\x12\x16.connpy.CopilotRequest\x1a\x17.connpy.CopilotResponse\"\x00\x12@\n\rlist_sessions\x12\x16.google.protobuf.Empty\x1a\x15.connpy.ValueResponse\"\x00\x12\x41\n\x0e\x64\x65lete_session\x12\x15.connpy.StringRequest\x1a\x16.google.protobuf.Empty\"\x00\x12G\n\x12\x63onfigure_provider\x12\x17.connpy.ProviderRequest\x1a\x16.google.protobuf.Empty\"\x00\x12=\n\rconfigure_mcp\x12\x12.connpy.MCPRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x44\n\x11load_session_data\x12\x15.connpy.StringRequest\x1a\x16.connpy.StructResponse\"\x00\x32\xc2\x02\n\rSystemService\x12\x39\n\tstart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12\x39\n\tdebug_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12<\n\x08stop_api\x12\x16.google.protobuf.Empty\x1a\x16.google.protobuf.Empty\"\x00\x12;\n\x0brestart_api\x12\x12.connpy.IntRequest\x1a\x16.google.protobuf.Empty\"\x00\x12@\n\x0eget_api_status\x12\x16.google.protobuf.Empty\x1a\x14.connpy.BoolResponse\"\x00\x62\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -34,83 +34,83 @@ _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'connpy.proto.connpy_pb2', _ if not _descriptor._USE_C_DESCRIPTORS: DESCRIPTOR._loaded_options = None _globals['_INTERACTREQUEST']._serialized_start=97 - _globals['_INTERACTREQUEST']._serialized_end=317 - _globals['_INTERACTRESPONSE']._serialized_start=320 - _globals['_INTERACTRESPONSE']._serialized_end=582 - _globals['_FILTERREQUEST']._serialized_start=584 - _globals['_FILTERREQUEST']._serialized_end=639 - _globals['_VALUERESPONSE']._serialized_start=641 - _globals['_VALUERESPONSE']._serialized_end=694 - _globals['_IDREQUEST']._serialized_start=696 - _globals['_IDREQUEST']._serialized_end=719 - _globals['_NODEREQUEST']._serialized_start=721 - _globals['_NODEREQUEST']._serialized_end=804 - _globals['_DELETEREQUEST']._serialized_start=806 - _globals['_DELETEREQUEST']._serialized_end=852 - _globals['_MESSAGEVALUE']._serialized_start=854 - _globals['_MESSAGEVALUE']._serialized_end=883 - _globals['_MOVEREQUEST']._serialized_start=885 - _globals['_MOVEREQUEST']._serialized_end=944 - _globals['_BULKREQUEST']._serialized_start=946 - _globals['_BULKREQUEST']._serialized_end=1033 - _globals['_STRUCTRESPONSE']._serialized_start=1035 - _globals['_STRUCTRESPONSE']._serialized_end=1090 - _globals['_PROFILEREQUEST']._serialized_start=1092 - _globals['_PROFILEREQUEST']._serialized_end=1139 - _globals['_STRUCTREQUEST']._serialized_start=1141 - _globals['_STRUCTREQUEST']._serialized_end=1195 - _globals['_STRINGREQUEST']._serialized_start=1197 - _globals['_STRINGREQUEST']._serialized_end=1227 - _globals['_STRINGRESPONSE']._serialized_start=1229 - _globals['_STRINGRESPONSE']._serialized_end=1260 - _globals['_UPDATEREQUEST']._serialized_start=1262 - _globals['_UPDATEREQUEST']._serialized_end=1329 - _globals['_PLUGINREQUEST']._serialized_start=1331 - _globals['_PLUGINREQUEST']._serialized_end=1397 - _globals['_RUNREQUEST']._serialized_start=1400 - _globals['_RUNREQUEST']._serialized_end=1565 - _globals['_TESTREQUEST']._serialized_start=1568 - _globals['_TESTREQUEST']._serialized_end=1752 - _globals['_SCRIPTREQUEST']._serialized_start=1754 - _globals['_SCRIPTREQUEST']._serialized_end=1819 - _globals['_EXPORTREQUEST']._serialized_start=1821 - _globals['_EXPORTREQUEST']._serialized_end=1872 - _globals['_LISTREQUEST']._serialized_start=1874 - _globals['_LISTREQUEST']._serialized_end=1902 - _globals['_ASKREQUEST']._serialized_start=1905 - _globals['_ASKREQUEST']._serialized_end=2199 - _globals['_AIRESPONSE']._serialized_start=2202 - _globals['_AIRESPONSE']._serialized_end=2402 - _globals['_BOOLRESPONSE']._serialized_start=2404 - _globals['_BOOLRESPONSE']._serialized_end=2433 - _globals['_PROVIDERREQUEST']._serialized_start=2435 - _globals['_PROVIDERREQUEST']._serialized_end=2502 - _globals['_INTREQUEST']._serialized_start=2504 - _globals['_INTREQUEST']._serialized_end=2531 - _globals['_NODERUNRESULT']._serialized_start=2533 - _globals['_NODERUNRESULT']._serialized_end=2645 - _globals['_FULLREPLACEREQUEST']._serialized_start=2647 - _globals['_FULLREPLACEREQUEST']._serialized_end=2756 - _globals['_COPILOTREQUEST']._serialized_start=2758 - _globals['_COPILOTREQUEST']._serialized_end=2846 - _globals['_COPILOTRESPONSE']._serialized_start=2848 - _globals['_COPILOTRESPONSE']._serialized_end=2933 - _globals['_MCPREQUEST']._serialized_start=2935 - _globals['_MCPREQUEST']._serialized_end=3032 - _globals['_NODESERVICE']._serialized_start=3035 - _globals['_NODESERVICE']._serialized_end=4028 - _globals['_PROFILESERVICE']._serialized_start=4031 - _globals['_PROFILESERVICE']._serialized_end=4437 - _globals['_CONFIGSERVICE']._serialized_start=4440 - _globals['_CONFIGSERVICE']._serialized_end=4870 - _globals['_PLUGINSERVICE']._serialized_start=4873 - _globals['_PLUGINSERVICE']._serialized_end=5203 - _globals['_EXECUTIONSERVICE']._serialized_start=5206 - _globals['_EXECUTIONSERVICE']._serialized_end=5489 - _globals['_IMPORTEXPORTSERVICE']._serialized_start=5492 - _globals['_IMPORTEXPORTSERVICE']._serialized_end=5718 - _globals['_AISERVICE']._serialized_start=5721 - _globals['_AISERVICE']._serialized_end=6248 - _globals['_SYSTEMSERVICE']._serialized_start=6251 - _globals['_SYSTEMSERVICE']._serialized_end=6573 + _globals['_INTERACTREQUEST']._serialized_end=349 + _globals['_INTERACTRESPONSE']._serialized_start=352 + _globals['_INTERACTRESPONSE']._serialized_end=614 + _globals['_FILTERREQUEST']._serialized_start=616 + _globals['_FILTERREQUEST']._serialized_end=671 + _globals['_VALUERESPONSE']._serialized_start=673 + _globals['_VALUERESPONSE']._serialized_end=726 + _globals['_IDREQUEST']._serialized_start=728 + _globals['_IDREQUEST']._serialized_end=751 + _globals['_NODEREQUEST']._serialized_start=753 + _globals['_NODEREQUEST']._serialized_end=836 + _globals['_DELETEREQUEST']._serialized_start=838 + _globals['_DELETEREQUEST']._serialized_end=884 + _globals['_MESSAGEVALUE']._serialized_start=886 + _globals['_MESSAGEVALUE']._serialized_end=915 + _globals['_MOVEREQUEST']._serialized_start=917 + _globals['_MOVEREQUEST']._serialized_end=976 + _globals['_BULKREQUEST']._serialized_start=978 + _globals['_BULKREQUEST']._serialized_end=1065 + _globals['_STRUCTRESPONSE']._serialized_start=1067 + _globals['_STRUCTRESPONSE']._serialized_end=1122 + _globals['_PROFILEREQUEST']._serialized_start=1124 + _globals['_PROFILEREQUEST']._serialized_end=1171 + _globals['_STRUCTREQUEST']._serialized_start=1173 + _globals['_STRUCTREQUEST']._serialized_end=1227 + _globals['_STRINGREQUEST']._serialized_start=1229 + _globals['_STRINGREQUEST']._serialized_end=1259 + _globals['_STRINGRESPONSE']._serialized_start=1261 + _globals['_STRINGRESPONSE']._serialized_end=1292 + _globals['_UPDATEREQUEST']._serialized_start=1294 + _globals['_UPDATEREQUEST']._serialized_end=1361 + _globals['_PLUGINREQUEST']._serialized_start=1363 + _globals['_PLUGINREQUEST']._serialized_end=1429 + _globals['_RUNREQUEST']._serialized_start=1432 + _globals['_RUNREQUEST']._serialized_end=1597 + _globals['_TESTREQUEST']._serialized_start=1600 + _globals['_TESTREQUEST']._serialized_end=1784 + _globals['_SCRIPTREQUEST']._serialized_start=1786 + _globals['_SCRIPTREQUEST']._serialized_end=1851 + _globals['_EXPORTREQUEST']._serialized_start=1853 + _globals['_EXPORTREQUEST']._serialized_end=1904 + _globals['_LISTREQUEST']._serialized_start=1906 + _globals['_LISTREQUEST']._serialized_end=1934 + _globals['_ASKREQUEST']._serialized_start=1937 + _globals['_ASKREQUEST']._serialized_end=2231 + _globals['_AIRESPONSE']._serialized_start=2234 + _globals['_AIRESPONSE']._serialized_end=2434 + _globals['_BOOLRESPONSE']._serialized_start=2436 + _globals['_BOOLRESPONSE']._serialized_end=2465 + _globals['_PROVIDERREQUEST']._serialized_start=2467 + _globals['_PROVIDERREQUEST']._serialized_end=2534 + _globals['_INTREQUEST']._serialized_start=2536 + _globals['_INTREQUEST']._serialized_end=2563 + _globals['_NODERUNRESULT']._serialized_start=2565 + _globals['_NODERUNRESULT']._serialized_end=2677 + _globals['_FULLREPLACEREQUEST']._serialized_start=2679 + _globals['_FULLREPLACEREQUEST']._serialized_end=2788 + _globals['_COPILOTREQUEST']._serialized_start=2790 + _globals['_COPILOTREQUEST']._serialized_end=2878 + _globals['_COPILOTRESPONSE']._serialized_start=2880 + _globals['_COPILOTRESPONSE']._serialized_end=2965 + _globals['_MCPREQUEST']._serialized_start=2967 + _globals['_MCPREQUEST']._serialized_end=3064 + _globals['_NODESERVICE']._serialized_start=3067 + _globals['_NODESERVICE']._serialized_end=4060 + _globals['_PROFILESERVICE']._serialized_start=4063 + _globals['_PROFILESERVICE']._serialized_end=4469 + _globals['_CONFIGSERVICE']._serialized_start=4472 + _globals['_CONFIGSERVICE']._serialized_end=4902 + _globals['_PLUGINSERVICE']._serialized_start=4905 + _globals['_PLUGINSERVICE']._serialized_end=5235 + _globals['_EXECUTIONSERVICE']._serialized_start=5238 + _globals['_EXECUTIONSERVICE']._serialized_end=5521 + _globals['_IMPORTEXPORTSERVICE']._serialized_start=5524 + _globals['_IMPORTEXPORTSERVICE']._serialized_end=5750 + _globals['_AISERVICE']._serialized_start=5753 + _globals['_AISERVICE']._serialized_end=6280 + _globals['_SYSTEMSERVICE']._serialized_start=6283 + _globals['_SYSTEMSERVICE']._serialized_end=6605 # @@protoc_insertion_point(module_scope) diff --git a/connpy/grpc_layer/server.py b/connpy/grpc_layer/server.py index 0761943..e053f95 100644 --- a/connpy/grpc_layer/server.py +++ b/connpy/grpc_layer/server.py @@ -223,158 +223,138 @@ class NodeServicer(connpy_pb2_grpc.NodeServiceServicer): copilot_node_info_json=node_info_json )) - # 2. Await the question from client via the copilot_queue - import threading - def preload_ai_deps(): - try: - import litellm - except Exception: - pass - threading.Thread(target=preload_ai_deps, daemon=True).start() - - try: - req_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=120) - if "question" not in req_data or not req_data["question"] or req_data["question"] == "CANCEL": - os.write(child_fd, b'\x15\r') - return - question = req_data["question"] - - context_buffer = req_data.get("context_buffer", "") - if context_buffer.startswith('{"context_start_pos"'): + while True: + # 2. Await the question from client via the copilot_queue + import threading + def preload_ai_deps(): try: - parsed = json.loads(context_buffer) - start_pos = parsed["context_start_pos"] - selected_raw = raw_bytes[start_pos:] - context_buffer = n._logclean(selected_raw.decode(errors='replace'), var=True) + import litellm except Exception: - context_buffer = buffer - elif not context_buffer: - context_buffer = buffer - except asyncio.TimeoutError: - os.write(child_fd, b'\x15\r') - return + pass + threading.Thread(target=preload_ai_deps, daemon=True).start() - # 3. Call AI Service with streaming - from ..services.ai_service import AIService - service = AIService(self.service.config) - - def chunk_callback(chunk_text): - if chunk_text: - response_queue.put(connpy_pb2.InteractResponse( - copilot_stream_chunk=chunk_text - )) - - # Create a clean version of node_info for the AI to save tokens and match local CLI behavior - ai_node_info = {k: v for k, v in node_info.items() if k not in ("context_blocks", "full_buffer")} - - ai_task = asyncio.create_task(service.aask_copilot(context_buffer, question, ai_node_info, chunk_callback=chunk_callback)) - wait_action_task = asyncio.create_task(remote_stream.copilot_queue.get()) - - done, pending = await asyncio.wait( - [ai_task, wait_action_task], - return_when=asyncio.FIRST_COMPLETED - ) - - if wait_action_task in done: - req_data = wait_action_task.result() - ai_task.cancel() - if req_data.get("question") == "CANCEL" or req_data.get("action") == "cancel": - os.write(child_fd, b'\x15\r') - return - return - else: - wait_action_task.cancel() - result = ai_task.result() - if not result: - os.write(child_fd, b'\x15\r') - return - - # 4. Send response back to client - response_queue.put(connpy_pb2.InteractResponse( - copilot_response_json=json.dumps(result) - )) - - # 5. Wait for user action - try: - action_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=60) - if "action" not in action_data or not action_data["action"] or action_data["action"] == "cancel": - os.write(child_fd, b'\x15\r') - return - action = action_data["action"] - except asyncio.TimeoutError: - os.write(child_fd, b'\x15\r') - return - - if action == "send_all": - commands = result.get("commands", []) - os.write(child_fd, b'\x15') # Ctrl+U to clear line - await asyncio.sleep(0.1) - - # Prepend screen length command to avoid pagination - if "screen_length_command" in n.tags: - os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) - response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) - await asyncio.sleep(0.8) - - for cmd in commands: - os.write(child_fd, (cmd + "\n").encode()) - response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd)) - await asyncio.sleep(0.8) - elif action.startswith("custom:"): - custom_cmds = action[7:] - os.write(child_fd, b'\x15') - await asyncio.sleep(0.1) - - # Prepend screen length command to avoid pagination - if "screen_length_command" in n.tags: - os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) - response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) - await asyncio.sleep(0.8) - - for cmd in custom_cmds.split('\n'): - if cmd.strip(): - os.write(child_fd, (cmd.strip() + "\n").encode()) - response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd.strip())) - await asyncio.sleep(0.8) - elif action not in ('cancel', 'n', 'no'): - # Handle numbers and ranges like "1,2,4-6" try: - commands = result.get("commands", []) - selected_indices = set() - for part in action.split(','): - part = part.strip() - if not part: continue - if '-' in part: - start_str, end_str = part.split('-', 1) - start = int(start_str) - 1 - end = int(end_str) - 1 - for i in range(start, end + 1): - selected_indices.add(i) - else: - selected_indices.add(int(part) - 1) - - valid_indices = sorted([i for i in selected_indices if 0 <= i < len(commands)]) - if valid_indices: - os.write(child_fd, b'\x15') - await asyncio.sleep(0.1) - - # Prepend screen length command to avoid pagination - if "screen_length_command" in n.tags: - os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) - response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) - await asyncio.sleep(0.8) - - for idx in valid_indices: - os.write(child_fd, (commands[idx] + "\n").encode()) - response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=commands[idx])) - await asyncio.sleep(0.8) - else: + req_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=120) + if not req_data: return + if "question" not in req_data or not req_data["question"] or req_data["question"] == "CANCEL" or req_data.get("action") == "cancel": os.write(child_fd, b'\x15\r') - except (ValueError, IndexError): + return + question = req_data["question"] + + merged_node_info_str = req_data.get("node_info_json", "") + if merged_node_info_str: + try: + merged_node_info = json.loads(merged_node_info_str) + node_info.update(merged_node_info) + except: pass + + context_buffer = req_data.get("context_buffer", "") + if context_buffer.startswith('{"context_start_pos"'): + try: + parsed = json.loads(context_buffer) + start_pos = parsed["context_start_pos"] + selected_raw = raw_bytes[start_pos:] + context_buffer = n._logclean(selected_raw.decode(errors='replace'), var=True) + except Exception: + context_buffer = buffer + elif not context_buffer: + context_buffer = buffer + except asyncio.TimeoutError: os.write(child_fd, b'\x15\r') - else: - # Cancelled or invalid action - os.write(child_fd, b'\x15\r') + return + + # 3. Call AI Service with streaming + from ..services.ai_service import AIService + service = AIService(self.service.config) + + def chunk_callback(chunk_text): + if chunk_text: + response_queue.put(connpy_pb2.InteractResponse( + copilot_stream_chunk=chunk_text + )) + + # Create a clean version of node_info for the AI to save tokens and match local CLI behavior + ai_node_info = {k: v for k, v in node_info.items() if k not in ("context_blocks", "full_buffer")} + + ai_task = asyncio.create_task(service.aask_copilot(context_buffer, question, ai_node_info, chunk_callback=chunk_callback)) + wait_action_task = asyncio.create_task(remote_stream.copilot_queue.get()) + + done, pending = await asyncio.wait( + [ai_task, wait_action_task], + return_when=asyncio.FIRST_COMPLETED + ) + + if wait_action_task in done: + req_data = wait_action_task.result() + ai_task.cancel() + if req_data.get("action") == "cancel" or req_data.get("question") == "CANCEL": + os.write(child_fd, b'\x15\r') + return + continue # Loop back instead of returning to keep session alive + else: + wait_action_task.cancel() + result = ai_task.result() + if not result: + os.write(child_fd, b'\x15\r') + return + + # 4. Send response back to client + response_queue.put(connpy_pb2.InteractResponse( + copilot_response_json=json.dumps(result) + )) + + # 5. Wait for user action + try: + action_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=60) + if not action_data: return + action = action_data.get("action", "cancel") + + if action == "continue": + continue # Loop back for next question + + if action == "cancel": + os.write(child_fd, b'\x15\r') + return + except asyncio.TimeoutError: + os.write(child_fd, b'\x15\r') + return + + if action == "send_all": + commands = result.get("commands", []) + os.write(child_fd, b'\x15') # Ctrl+U to clear line + await asyncio.sleep(0.1) + + # Prepend screen length command to avoid pagination + if "screen_length_command" in n.tags: + os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) + await asyncio.sleep(0.8) + + for cmd in commands: + os.write(child_fd, (cmd + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd)) + await asyncio.sleep(0.8) + return + elif action.startswith("custom:"): + custom_cmds = action[7:] + os.write(child_fd, b'\x15') + await asyncio.sleep(0.1) + + # Prepend screen length command to avoid pagination + if "screen_length_command" in n.tags: + os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) + await asyncio.sleep(0.8) + + for cmd in custom_cmds.split('\n'): + if cmd.strip(): + os.write(child_fd, (cmd.strip() + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd.strip())) + await asyncio.sleep(0.8) + return + else: + os.write(child_fd, b'\x15\r') + return asyncio.run(n._async_interact_loop(remote_stream, resize_callback, copilot_handler=remote_copilot_handler)) except Exception as e: diff --git a/connpy/grpc_layer/stubs.py b/connpy/grpc_layer/stubs.py index 2db457f..5250c5e 100644 --- a/connpy/grpc_layer/stubs.py +++ b/connpy/grpc_layer/stubs.py @@ -51,16 +51,22 @@ class NodeStub: pause_generator() termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - interface = CopilotInterface(self.config, history=getattr(self, 'copilot_history', None)) + interface = CopilotInterface( + self.config, + history=getattr(self, 'copilot_history', None), + session_state=getattr(self, 'copilot_state', None) + ) self.copilot_history = interface.history + self.copilot_state = interface.session_state node_info = json.loads(res.copilot_node_info_json) if res.copilot_node_info_json else {} - async def on_ai_call_remote(active_buffer, question, chunk_callback): + async def on_ai_call_remote(active_buffer, question, chunk_callback, merged_node_info): # Send request to server request_queue.put(connpy_pb2.InteractRequest( copilot_question=question, - copilot_context_buffer=active_buffer + copilot_context_buffer=active_buffer, + copilot_node_info_json=json.dumps(merged_node_info) )) # Wait for chunks from server while True: @@ -76,12 +82,20 @@ class NodeStub: # Wrap in async loop async def run_remote_copilot(): - return await interface.run_session( - raw_bytes=bytes(client_buffer_bytes), - cmd_byte_positions=cmd_byte_positions, - node_info=node_info, - on_ai_call=on_ai_call_remote - ) + while True: + action, commands, custom_cmd = await interface.run_session( + raw_bytes=bytes(client_buffer_bytes), + cmd_byte_positions=cmd_byte_positions, + node_info=node_info, + on_ai_call=on_ai_call_remote + ) + + if action == "continue": + # Send continue signal to server to loop back for another question + request_queue.put(connpy_pb2.InteractRequest(copilot_action="continue")) + continue + + return action, commands, custom_cmd with copilot_terminal_mode(): action, commands, custom_cmd = asyncio.run(run_remote_copilot()) diff --git a/connpy/printer.py b/connpy/printer.py index 501b215..a9bfcaf 100644 --- a/connpy/printer.py +++ b/connpy/printer.py @@ -46,8 +46,9 @@ def _get_local(): _local.console = None if not hasattr(_local, 'err_console'): _local.err_console = None - if not hasattr(_local, 'theme'): - _local.theme = None + if not hasattr(_local, 'theme') or _local.theme is None: + from rich.theme import Theme + _local.theme = Theme(_global_active_styles) return _local def set_thread_stream(stream): @@ -69,23 +70,45 @@ def get_original_stderr(): # Centralized design system STYLES = { - "info": "cyan", - "warning": "yellow", - "error": "red", - "success": "green", - "debug": "dim", - "header": "bold cyan", - "key": "bold cyan", - "border": "cyan", - "pass": "bold green", - "fail": "bold red", - "engineer": "blue", - "architect": "medium_purple", - "ai_status": "bold green", - "user_prompt": "bold cyan", - "unavailable": "orange3", + "info": "#00ffff", # Cyan + "warning": "#ffff00", # Yellow + "error": "#ff0000", # Red + "success": "#00ff00", # Green + "debug": "#888888", + "header": "bold #00ffff", + "key": "bold #00ffff", + "border": "#00ffff", + "pass": "bold #00ff00", + "fail": "bold #ff0000", + "engineer": "#5fafff", # Sky Blue (lighter than pure blue) + "architect": "#9370db", # Medium Purple + "ai_status": "bold #00ff00", + "user_prompt": "bold #00afd7", # Deep Sky Blue / Soft Cyan + "unavailable": "#d78700", + "contrast": "#bbbbbb", } +LIGHT_THEME = { + "info": "#00008b", # Navy Blue + "warning": "#d78700", # Orange + "error": "#cd0000", # Dark Red + "success": "#006400", # Dark Green + "debug": "#777777", + "header": "bold #00008b", + "key": "bold #00008b", + "border": "#00008b", + "pass": "bold #006400", + "fail": "bold #cd0000", + "engineer": "#00008b", + "architect": "#8b008b", # Dark Magenta + "ai_status": "bold #006400", + "user_prompt": "bold #00008b", + "unavailable": "#666666", + "contrast": "#777777", +} + +_global_active_styles = STYLES.copy() + def _get_console(): local = _get_local() @@ -171,7 +194,7 @@ def connpy_theme(): local = _get_local() if local.theme is None: from rich.theme import Theme - local.theme = Theme(STYLES) + local.theme = Theme(_global_active_styles) return local.theme def apply_theme(user_styles=None): @@ -179,6 +202,7 @@ def apply_theme(user_styles=None): Updates the global console themes with user-defined styles. If a style is missing in user_styles, it falls back to the default in STYLES. """ + global _global_active_styles local = _get_local() from rich.theme import Theme @@ -190,6 +214,7 @@ def apply_theme(user_styles=None): if key in active_styles: active_styles[key] = value + _global_active_styles = active_styles local.theme = Theme(active_styles) if local.console: local.console.push_theme(local.theme) @@ -202,10 +227,15 @@ def _format_multiline(tag, message, style=None): message = str(message) lines = message.splitlines() if not lines: - return f"[{style}]\\[{tag}][/{style}]" if style else f"\\[{tag}]" + if style: + return f"[{style}]\\[{tag}][/{style}]" + return f"\\[{tag}]" # Apply style to the tag if provided styled_tag = f"[{style}]\\[{tag}][/{style}]" if style else f"\\[{tag}]" + if style: + # Include brackets in the styling + styled_tag = f"[{style}]\\[{tag}][/{style}]" formatted = [f"{styled_tag} {lines[0]}"] # Indent subsequent lines @@ -462,7 +492,7 @@ class _ThemeProxy: local = _get_local() if local.theme is None: from rich.theme import Theme - local.theme = Theme(STYLES) + local.theme = Theme(_global_active_styles) return getattr(local.theme, name) connpy_theme = _ThemeProxy() diff --git a/connpy/proto/connpy.proto b/connpy/proto/connpy.proto index 2c1a0ec..4ea99e3 100644 --- a/connpy/proto/connpy.proto +++ b/connpy/proto/connpy.proto @@ -95,6 +95,7 @@ message InteractRequest { string copilot_question = 8; string copilot_action = 9; string copilot_context_buffer = 10; + string copilot_node_info_json = 13; } message InteractResponse { diff --git a/connpy/services/ai_service.py b/connpy/services/ai_service.py index 6d2b86e..b9eccd2 100644 --- a/connpy/services/ai_service.py +++ b/connpy/services/ai_service.py @@ -45,6 +45,65 @@ class AIService(BaseService): blocks.append((pos, preview[:80])) return blocks + def process_copilot_input(self, input_text: str, session_state: dict) -> dict: + """Parses slash commands and manages session state. Returns directive dict.""" + text = input_text.strip() + if not text.startswith('/'): + return {"action": "execute", "clean_prompt": text, "overrides": {}} + + parts = text.split(maxsplit=1) + cmd = parts[0].lower() + args = parts[1] if len(parts) > 1 else "" + + # 1. State Commands (Persistent) + if cmd == "/os": + if args: + session_state['os'] = args + return {"action": "state_update", "message": f"OS context changed to {args}"} + elif cmd == "/prompt": + if args: + session_state['prompt'] = args + return {"action": "state_update", "message": f"Prompt regex changed to {args}"} + elif cmd == "/memorize": + if args: + session_state['memories'].append(args) + return {"action": "state_update", "message": f"Memory added: {args}"} + elif cmd == "/clear": + session_state['memories'] = [] + return {"action": "state_update", "message": "Memory cleared"} + + # 2. Hybrid Commands + elif cmd == "/architect": + if not args: + session_state['persona'] = 'architect' + return {"action": "state_update", "message": "Persona set to Architect"} + else: + return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "architect"}} + + elif cmd == "/engineer": + if not args: + session_state['persona'] = 'engineer' + return {"action": "state_update", "message": "Persona set to Engineer"} + else: + return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "engineer"}} + + elif cmd == "/trust": + if not args: + session_state['trust_mode'] = True + return {"action": "state_update", "message": "Auto-execute (trust) enabled for session"} + else: + return {"action": "execute", "clean_prompt": args, "overrides": {"trust": True}} + + elif cmd == "/untrust": + if not args: + session_state['trust_mode'] = False + return {"action": "state_update", "message": "Auto-execute (trust) disabled for session"} + else: + return {"action": "execute", "clean_prompt": args, "overrides": {"trust": False}} + + # Unknown command, execute normally + return {"action": "execute", "clean_prompt": text, "overrides": {}} + def ask(self, input_text, dryrun=False, chat_history=None, status=None, debug=False, session_id=None, console=None, chunk_callback=None, confirm_handler=None, trust=False, **overrides): """Send a prompt to the AI agent.""" from connpy.ai import ai diff --git a/connpy/services/config_service.py b/connpy/services/config_service.py index 3a63746..5f900bd 100644 --- a/connpy/services/config_service.py +++ b/connpy/services/config_service.py @@ -70,6 +70,10 @@ class ConfigService(BaseService): if not isinstance(user_styles, dict): raise InvalidConfigurationError("Theme file must be a YAML dictionary.") + # Support both direct styles and nested under 'theme' key + if "theme" in user_styles and isinstance(user_styles["theme"], dict): + user_styles = user_styles["theme"] + # Filter for valid styles only (prevent junk in config) valid_styles = {k: v for k, v in user_styles.items() if k in STYLES} diff --git a/connpy/tunnels.py b/connpy/tunnels.py index a0ed142..1043738 100644 --- a/connpy/tunnels.py +++ b/connpy/tunnels.py @@ -162,13 +162,19 @@ class RemoteStream: if req.cols > 0 and req.rows > 0: if self.resize_callback: self._loop.call_soon_threadsafe(self.resize_callback, req.rows, req.cols) + # Copilot dispatching + copilot_msg = {} if getattr(req, "copilot_question", ""): - self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, { + copilot_msg.update({ "question": req.copilot_question, - "context_buffer": getattr(req, "copilot_context_buffer", "") + "context_buffer": getattr(req, "copilot_context_buffer", ""), + "node_info_json": getattr(req, "copilot_node_info_json", "") }) if getattr(req, "copilot_action", ""): - self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, {"action": req.copilot_action}) + copilot_msg["action"] = req.copilot_action + + if copilot_msg: + self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, copilot_msg) if req.stdin_data: self._loop.call_soon_threadsafe(self._reader_queue.put_nowait, req.stdin_data) except Exception: diff --git a/docker-compose.yml b/docker-compose.yml index 88a622b..a0744ab 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,9 +1,17 @@ -version: "3.8" services: connpy-app: build: . - image: connpy-app + image: connpy:latest + container_name: connpy + # Fundamental para la interactividad de la terminal + stdin_open: true + tty: true + environment: + - TERM=xterm-256color + extra_hosts: + - "host.docker.internal:host-gateway" volumes: - - ./docker/connpy/:/app - - ./docker/logs/:/logs - - ./docker/ssh/:/root/.ssh/ + - ./docker/config:/config + - ./docker/ssh:/root/.ssh + - /var/run/docker.sock:/var/run/docker.sock + # No definimos comando por defecto para que 'run' sea mΓ‘s natural diff --git a/docker/connpy/.gitignore b/docker/config/.gitignore similarity index 100% rename from docker/connpy/.gitignore rename to docker/config/.gitignore diff --git a/dockerfile b/dockerfile index a6baeae..fc4f68b 100644 --- a/dockerfile +++ b/dockerfile @@ -1,21 +1,65 @@ -# Use the official python image +# connpy v6.0.0b8 - Modern Network Automation Environment (Local Build) +FROM python:3.11-slim -FROM python:3.11-alpine as connpy-app +LABEL description="Connpy: AI-Driven Network Automation & Intelligence Platform" + +# ConfiguraciΓ³n de Terminal y Python +ENV DEBIAN_FRONTEND=noninteractive \ + PYTHONUNBUFFERED=1 \ + TERM=xterm-256color -# Set the entrypoint -# Set the working directory WORKDIR /app -# Install any additional dependencies -RUN apk update && apk add --no-cache openssh fzf fzf-tmux ncurses bash -RUN pip3 install connpy -RUN connpy config --configfolder /app +# 1. Herramientas base del sistema +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + git \ + openssh-client \ + fzf \ + ncurses-bin \ + bash \ + procps \ + unzip \ + ca-certificates \ + gnupg \ + iputils-ping \ + telnet \ + && rm -rf /var/lib/apt/lists/* -#AUTH -RUN ssh-keygen -A -RUN mkdir /root/.ssh && \ - chmod 700 /root/.ssh +# 2. Instalar Docker CLI (para el plugin de docker de connpy) +RUN install -m 0755 -d /etc/apt/keyrings && \ + curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null && \ + apt-get update && apt-get install -y docker-ce-cli && \ + rm -rf /var/lib/apt/lists/* +# 3. Instalar Kubectl (para el plugin de k8s de connpy) +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl" && \ + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl && \ + rm kubectl -#Set the entrypoint -ENTRYPOINT ["connpy"] +# 4. Instalar AWS CLI y Session Manager Plugin (Universal x86_64/ARM64) +RUN ARCH=$(uname -m) && \ + if [ "$ARCH" = "x86_64" ]; then AWS_ARCH="x86_64"; else AWS_ARCH="aarch64"; fi && \ + curl "https://awscli.amazonaws.com/awscli-exe-linux-$AWS_ARCH.zip" -o "awscliv2.zip" && \ + unzip awscliv2.zip && ./aws/install && rm -rf awscliv2.zip aws/ && \ + if [ "$ARCH" = "x86_64" ]; then \ + curl "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_64bit/session-manager-plugin.deb" -o "ssm.deb"; \ + else \ + curl "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/ubuntu_arm64/session-manager-plugin.deb" -o "ssm.deb"; \ + fi && \ + dpkg -i ssm.deb && rm ssm.deb + +# 5. Copiar cΓ³digo local e instalar dependencias +COPY . . +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir . + +# 6. ConfiguraciΓ³n de persistencia +# Creamos la carpeta y el puntero .folder para que connpy use /config +RUN mkdir -p /config /root/.ssh /root/.config/conn && chmod 700 /root/.ssh && \ + echo -n "/config" > /root/.config/conn/.folder + +# Punto de entrada directo a connpy +ENTRYPOINT ["conn"] diff --git a/docs/connpy/cli/ai_handler.html b/docs/connpy/cli/ai_handler.html index 1756f97..3f88ec2 100644 --- a/docs/connpy/cli/ai_handler.html +++ b/docs/connpy/cli/ai_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.ai_handler API documentation @@ -77,6 +77,9 @@ el.replaceWith(d); except Exception as e: printer.error(str(e)) return + + if args.mcp is not None: + return self.configure_mcp(args) # Determinar session_id para retomar session_id = None @@ -156,7 +159,7 @@ el.replaceWith(d); try: user_query = Prompt.ask("[user_prompt]User[/user_prompt]") if not user_query.strip(): continue - if user_query.lower() in ['exit', 'quit', 'bye']: break + if user_query.lower() in ['exit', 'quit', 'bye', 'cancel']: break with console.status("[ai_status]Agent is thinking...") as status: result = self.app.myai.ask(user_query, chat_history=history, status=status, debug=args.debug, trust=args.trust, **self.ai_overrides) @@ -179,11 +182,245 @@ el.replaceWith(d); console.print(f"[debug]Tokens: {u['total']} (Input: {u['input']}, Output: {u['output']})[/debug]") except (KeyboardInterrupt, EOFError): console.print("\n[dim]Session closed.[/dim]") - break + break + + def configure_mcp(self, args): + """Handle MCP server configuration via CLI tokens or interactive wizard.""" + mcp_args = args.mcp + + # 1. Non-interactive CLI Mode (if arguments are provided) + if mcp_args: + action = mcp_args[0].lower() + + if action == "list": + settings = self.app.services.config_svc.get_settings() + mcp_servers = settings.get("ai", {}).get("mcp_servers", {}) + if not mcp_servers: + printer.info("No MCP servers configured.") + else: + columns = ["Name", "URL", "Enabled", "Auto-load OS"] + rows = [] + for name, cfg in mcp_servers.items(): + rows.append([ + name, + cfg.get("url", ""), + "[green]Yes[/green]" if cfg.get("enabled", True) else "[red]No[/red]", + cfg.get("auto_load_on_os", "Any") + ]) + printer.table("Configured MCP Servers", columns, rows) + return + + elif action == "add": + if len(mcp_args) < 3: + printer.error("Usage: connpy ai --mcp add <name> <url> [os_filter]") + return + name, url = mcp_args[1], mcp_args[2] + os_filter = mcp_args[3] if len(mcp_args) > 3 else None + try: + self.app.services.ai.configure_mcp(name, url=url, auto_load_on_os=os_filter) + printer.success(f"MCP server '{name}' added/updated.") + except Exception as e: + printer.error(str(e)) + return + + elif action == "remove": + if len(mcp_args) < 2: + printer.error("Usage: connpy ai --mcp remove <name>") + return + name = mcp_args[1] + try: + self.app.services.ai.configure_mcp(name, remove=True) + printer.success(f"MCP server '{name}' removed.") + except Exception as e: + printer.error(str(e)) + return + + elif action in ["enable", "disable"]: + if len(mcp_args) < 2: + printer.error(f"Usage: connpy ai --mcp {action} <name>") + return + name = mcp_args[1] + enabled = (action == "enable") + try: + self.app.services.ai.configure_mcp(name, enabled=enabled) + printer.success(f"MCP server '{name}' {'enabled' if enabled else 'disabled'}.") + except Exception as e: + printer.error(str(e)) + return + + else: + printer.error(f"Unknown MCP action: {action}") + printer.info("Available actions: list, add, remove, enable, disable") + return + + # 2. Interactive Wizard Mode (if no arguments provided) + # Import forms dynamically to avoid circular dependencies if any + if not hasattr(self.app, "cli_forms"): + from .forms import Forms + self.app.cli_forms = Forms(self.app) + + settings = self.app.services.config_svc.get_settings() + mcp_servers = settings.get("ai", {}).get("mcp_servers", {}) + + result = self.app.cli_forms.mcp_wizard(mcp_servers) + if not result: + return + + action = result["action"] + try: + if action == "list": + # Recursive call to the non-interactive list logic + args.mcp = ["list"] + return self.configure_mcp(args) + + elif action == "add": + self.app.services.ai.configure_mcp( + result["name"], + url=result["url"], + enabled=result["enabled"], + auto_load_on_os=result["os"] + ) + printer.success(f"MCP server '{result['name']}' saved.") + + elif action == "update": # Used for toggle + self.app.services.ai.configure_mcp( + result["name"], + enabled=result["enabled"] + ) + printer.success(f"MCP server '{result['name']}' updated.") + + elif action == "remove": + self.app.services.ai.configure_mcp(result["name"], remove=True) + printer.success(f"MCP server '{result['name']}' removed.") + + except Exception as e: + printer.error(str(e))

Methods

+
+def configure_mcp(self, args) +
+
+
+ +Expand source code + +
def configure_mcp(self, args):
+    """Handle MCP server configuration via CLI tokens or interactive wizard."""
+    mcp_args = args.mcp
+    
+    # 1. Non-interactive CLI Mode (if arguments are provided)
+    if mcp_args:
+        action = mcp_args[0].lower()
+        
+        if action == "list":
+            settings = self.app.services.config_svc.get_settings()
+            mcp_servers = settings.get("ai", {}).get("mcp_servers", {})
+            if not mcp_servers:
+                printer.info("No MCP servers configured.")
+            else:
+                columns = ["Name", "URL", "Enabled", "Auto-load OS"]
+                rows = []
+                for name, cfg in mcp_servers.items():
+                    rows.append([
+                        name, 
+                        cfg.get("url", ""), 
+                        "[green]Yes[/green]" if cfg.get("enabled", True) else "[red]No[/red]",
+                        cfg.get("auto_load_on_os", "Any")
+                    ])
+                printer.table("Configured MCP Servers", columns, rows)
+            return
+
+        elif action == "add":
+            if len(mcp_args) < 3:
+                printer.error("Usage: connpy ai --mcp add <name> <url> [os_filter]")
+                return
+            name, url = mcp_args[1], mcp_args[2]
+            os_filter = mcp_args[3] if len(mcp_args) > 3 else None
+            try:
+                self.app.services.ai.configure_mcp(name, url=url, auto_load_on_os=os_filter)
+                printer.success(f"MCP server '{name}' added/updated.")
+            except Exception as e:
+                printer.error(str(e))
+            return
+
+        elif action == "remove":
+            if len(mcp_args) < 2:
+                printer.error("Usage: connpy ai --mcp remove <name>")
+                return
+            name = mcp_args[1]
+            try:
+                self.app.services.ai.configure_mcp(name, remove=True)
+                printer.success(f"MCP server '{name}' removed.")
+            except Exception as e:
+                printer.error(str(e))
+            return
+
+        elif action in ["enable", "disable"]:
+            if len(mcp_args) < 2:
+                printer.error(f"Usage: connpy ai --mcp {action} <name>")
+                return
+            name = mcp_args[1]
+            enabled = (action == "enable")
+            try:
+                self.app.services.ai.configure_mcp(name, enabled=enabled)
+                printer.success(f"MCP server '{name}' {'enabled' if enabled else 'disabled'}.")
+            except Exception as e:
+                printer.error(str(e))
+            return
+        
+        else:
+            printer.error(f"Unknown MCP action: {action}")
+            printer.info("Available actions: list, add, remove, enable, disable")
+            return
+
+    # 2. Interactive Wizard Mode (if no arguments provided)
+    # Import forms dynamically to avoid circular dependencies if any
+    if not hasattr(self.app, "cli_forms"):
+        from .forms import Forms
+        self.app.cli_forms = Forms(self.app)
+        
+    settings = self.app.services.config_svc.get_settings()
+    mcp_servers = settings.get("ai", {}).get("mcp_servers", {})
+    
+    result = self.app.cli_forms.mcp_wizard(mcp_servers)
+    if not result:
+        return
+
+    action = result["action"]
+    try:
+        if action == "list":
+            # Recursive call to the non-interactive list logic
+            args.mcp = ["list"]
+            return self.configure_mcp(args)
+        
+        elif action == "add":
+            self.app.services.ai.configure_mcp(
+                result["name"], 
+                url=result["url"], 
+                enabled=result["enabled"],
+                auto_load_on_os=result["os"]
+            )
+            printer.success(f"MCP server '{result['name']}' saved.")
+        
+        elif action == "update": # Used for toggle
+            self.app.services.ai.configure_mcp(
+                result["name"], 
+                enabled=result["enabled"]
+            )
+            printer.success(f"MCP server '{result['name']}' updated.")
+            
+        elif action == "remove":
+            self.app.services.ai.configure_mcp(result["name"], remove=True)
+            printer.success(f"MCP server '{result['name']}' removed.")
+            
+    except Exception as e:
+        printer.error(str(e))
+
+

Handle MCP server configuration via CLI tokens or interactive wizard.

+
def dispatch(self, args)
@@ -210,6 +447,9 @@ el.replaceWith(d); except Exception as e: printer.error(str(e)) return + + if args.mcp is not None: + return self.configure_mcp(args) # Determinar session_id para retomar session_id = None @@ -283,7 +523,7 @@ el.replaceWith(d); try: user_query = Prompt.ask("[user_prompt]User[/user_prompt]") if not user_query.strip(): continue - if user_query.lower() in ['exit', 'quit', 'bye']: break + if user_query.lower() in ['exit', 'quit', 'bye', 'cancel']: break with console.status("[ai_status]Agent is thinking...") as status: result = self.app.myai.ask(user_query, chat_history=history, status=status, debug=args.debug, trust=args.trust, **self.ai_overrides) @@ -356,6 +596,7 @@ el.replaceWith(d);
  • AIHandler

      +
    • configure_mcp
    • dispatch
    • interactive_chat
    • single_question
    • @@ -367,7 +608,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/api_handler.html b/docs/connpy/cli/api_handler.html index 29eb836..1263f6e 100644 --- a/docs/connpy/cli/api_handler.html +++ b/docs/connpy/cli/api_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.api_handler API documentation @@ -193,7 +193,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/config_handler.html b/docs/connpy/cli/config_handler.html index 667773d..a95351c 100644 --- a/docs/connpy/cli/config_handler.html +++ b/docs/connpy/cli/config_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.config_handler API documentation @@ -482,7 +482,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/context_handler.html b/docs/connpy/cli/context_handler.html index 11e2a86..a6b3dfb 100644 --- a/docs/connpy/cli/context_handler.html +++ b/docs/connpy/cli/context_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.context_handler API documentation @@ -249,7 +249,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/forms.html b/docs/connpy/cli/forms.html index b4290fa..da27067 100644 --- a/docs/connpy/cli/forms.html +++ b/docs/connpy/cli/forms.html @@ -3,7 +3,7 @@ - + connpy.cli.forms API documentation @@ -249,11 +249,183 @@ el.replaceWith(d); if "tags" in answer and not answer["tags"].startswith("@") and answer["tags"]: answer["tags"] = ast.literal_eval(answer["tags"]) - return answer + return answer + + def mcp_wizard(self, mcp_servers): + """Interactive wizard to manage MCP servers.""" + from .helpers import theme + + while True: + options = [ + ("List Configured Servers", "list"), + ("Add/Update Server", "add"), + ("Enable/Disable Server", "toggle"), + ("Remove Server", "remove"), + ("Back", "exit") + ] + + questions = [ + inquirer.List("action", message="MCP Configuration", choices=options) + ] + + answers = inquirer.prompt(questions, theme=theme) + if not answers or answers["action"] == "exit": + return None + + action = answers["action"] + + if action == "list": + if not mcp_servers: + print("\nNo MCP servers configured.\n") + else: + return {"action": "list"} + + elif action == "add": + questions = [ + inquirer.Text("name", message="Server Name (identifier)"), + inquirer.Text("url", message="SSE URL (e.g., http://localhost:8000/sse)"), + inquirer.Confirm("enabled", message="Enabled?", default=True), + inquirer.Text("auto_load_os", message="Auto-load on specific OS (blank for any)") + ] + answers = inquirer.prompt(questions, theme=theme) + if answers: + return { + "action": "add", + "name": answers["name"], + "url": answers["url"], + "enabled": answers["enabled"], + "os": answers["auto_load_os"] + } + + elif action == "toggle": + if not mcp_servers: + print("\nNo servers to toggle.\n") + continue + + choices = [] + for name, cfg in mcp_servers.items(): + status = "[Enabled]" if cfg.get("enabled", True) else "[Disabled]" + choices.append((f"{name} {status}", name)) + + questions = [ + inquirer.List("name", message="Select server to toggle", choices=choices + [("Cancel", None)]) + ] + answers = inquirer.prompt(questions, theme=theme) + if answers and answers["name"]: + current = mcp_servers[answers["name"]].get("enabled", True) + return { + "action": "update", + "name": answers["name"], + "enabled": not current + } + + elif action == "remove": + if not mcp_servers: + print("\nNo servers to remove.\n") + continue + + questions = [ + inquirer.List("name", message="Select server to remove", choices=list(mcp_servers.keys()) + ["Cancel"]) + ] + answers = inquirer.prompt(questions, theme=theme) + if answers and answers["name"] != "Cancel": + return {"action": "remove", "name": answers["name"]} + return None

      Methods

      +
      +def mcp_wizard(self, mcp_servers) +
      +
      +
      + +Expand source code + +
      def mcp_wizard(self, mcp_servers):
      +    """Interactive wizard to manage MCP servers."""
      +    from .helpers import theme
      +    
      +    while True:
      +        options = [
      +            ("List Configured Servers", "list"),
      +            ("Add/Update Server", "add"),
      +            ("Enable/Disable Server", "toggle"),
      +            ("Remove Server", "remove"),
      +            ("Back", "exit")
      +        ]
      +        
      +        questions = [
      +            inquirer.List("action", message="MCP Configuration", choices=options)
      +        ]
      +        
      +        answers = inquirer.prompt(questions, theme=theme)
      +        if not answers or answers["action"] == "exit":
      +            return None
      +            
      +        action = answers["action"]
      +        
      +        if action == "list":
      +            if not mcp_servers:
      +                print("\nNo MCP servers configured.\n")
      +            else:
      +                return {"action": "list"}
      +        
      +        elif action == "add":
      +            questions = [
      +                inquirer.Text("name", message="Server Name (identifier)"),
      +                inquirer.Text("url", message="SSE URL (e.g., http://localhost:8000/sse)"),
      +                inquirer.Confirm("enabled", message="Enabled?", default=True),
      +                inquirer.Text("auto_load_os", message="Auto-load on specific OS (blank for any)")
      +            ]
      +            answers = inquirer.prompt(questions, theme=theme)
      +            if answers:
      +                return {
      +                    "action": "add",
      +                    "name": answers["name"],
      +                    "url": answers["url"],
      +                    "enabled": answers["enabled"],
      +                    "os": answers["auto_load_os"]
      +                }
      +        
      +        elif action == "toggle":
      +            if not mcp_servers:
      +                print("\nNo servers to toggle.\n")
      +                continue
      +            
      +            choices = []
      +            for name, cfg in mcp_servers.items():
      +                status = "[Enabled]" if cfg.get("enabled", True) else "[Disabled]"
      +                choices.append((f"{name} {status}", name))
      +            
      +            questions = [
      +                inquirer.List("name", message="Select server to toggle", choices=choices + [("Cancel", None)])
      +            ]
      +            answers = inquirer.prompt(questions, theme=theme)
      +            if answers and answers["name"]:
      +                current = mcp_servers[answers["name"]].get("enabled", True)
      +                return {
      +                    "action": "update",
      +                    "name": answers["name"],
      +                    "enabled": not current
      +                }
      +        
      +        elif action == "remove":
      +            if not mcp_servers:
      +                print("\nNo servers to remove.\n")
      +                continue
      +                
      +            questions = [
      +                inquirer.List("name", message="Select server to remove", choices=list(mcp_servers.keys()) + ["Cancel"])
      +            ]
      +            answers = inquirer.prompt(questions, theme=theme)
      +            if answers and answers["name"] != "Cancel":
      +                return {"action": "remove", "name": answers["name"]}
      +    return None
      +
      +

      Interactive wizard to manage MCP servers.

      +
      def questions_bulk(self, nodes='', hosts='')
      @@ -505,6 +677,7 @@ el.replaceWith(d);
    • Forms

        +
      • mcp_wizard
      • questions_bulk
      • questions_edit
      • questions_nodes
      • @@ -517,7 +690,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/help_text.html b/docs/connpy/cli/help_text.html index 1440196..1fe8afd 100644 --- a/docs/connpy/cli/help_text.html +++ b/docs/connpy/cli/help_text.html @@ -3,7 +3,7 @@ - + connpy.cli.help_text API documentation @@ -303,7 +303,7 @@ tasks: diff --git a/docs/connpy/cli/helpers.html b/docs/connpy/cli/helpers.html index 052dd92..c0a11ca 100644 --- a/docs/connpy/cli/helpers.html +++ b/docs/connpy/cli/helpers.html @@ -3,7 +3,7 @@ - + connpy.cli.helpers API documentation @@ -207,7 +207,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/import_export_handler.html b/docs/connpy/cli/import_export_handler.html index cbb12ec..6f6aa1b 100644 --- a/docs/connpy/cli/import_export_handler.html +++ b/docs/connpy/cli/import_export_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.import_export_handler API documentation @@ -272,7 +272,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/index.html b/docs/connpy/cli/index.html index 2bf030c..d22bb1e 100644 --- a/docs/connpy/cli/index.html +++ b/docs/connpy/cli/index.html @@ -3,7 +3,7 @@ - + connpy.cli API documentation @@ -92,6 +92,10 @@ el.replaceWith(d);
        +
        connpy.cli.terminal_ui
        +
        +
        +
        connpy.cli.validators
        @@ -130,6 +134,7 @@ el.replaceWith(d);
      • connpy.cli.profile_handler
      • connpy.cli.run_handler
      • connpy.cli.sync_handler
      • +
      • connpy.cli.terminal_ui
      • connpy.cli.validators
    • @@ -137,7 +142,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/node_handler.html b/docs/connpy/cli/node_handler.html index 9123836..992ba84 100644 --- a/docs/connpy/cli/node_handler.html +++ b/docs/connpy/cli/node_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.node_handler API documentation @@ -606,7 +606,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/plugin_handler.html b/docs/connpy/cli/plugin_handler.html index 318bde6..17142e7 100644 --- a/docs/connpy/cli/plugin_handler.html +++ b/docs/connpy/cli/plugin_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.plugin_handler API documentation @@ -385,7 +385,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/profile_handler.html b/docs/connpy/cli/profile_handler.html index 67daeaf..0d6680f 100644 --- a/docs/connpy/cli/profile_handler.html +++ b/docs/connpy/cli/profile_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.profile_handler API documentation @@ -314,7 +314,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/run_handler.html b/docs/connpy/cli/run_handler.html index 7213d59..8f6048c 100644 --- a/docs/connpy/cli/run_handler.html +++ b/docs/connpy/cli/run_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.run_handler API documentation @@ -169,7 +169,7 @@ el.replaceWith(d); commands=commands, variables=variables, parallel=options.get("parallel", 10), - timeout=options.get("timeout", 10), + timeout=options.get("timeout", 20), folder=folder, prompt=prompt, on_node_complete=_on_run_complete @@ -203,7 +203,7 @@ el.replaceWith(d); expected=expected, variables=variables, parallel=options.get("parallel", 10), - timeout=options.get("timeout", 10), + timeout=options.get("timeout", 20), folder=folder, prompt=prompt, on_node_complete=_on_test_complete @@ -260,7 +260,7 @@ el.replaceWith(d); commands=commands, variables=variables, parallel=options.get("parallel", 10), - timeout=options.get("timeout", 10), + timeout=options.get("timeout", 20), folder=folder, prompt=prompt, on_node_complete=_on_run_complete @@ -294,7 +294,7 @@ el.replaceWith(d); expected=expected, variables=variables, parallel=options.get("parallel", 10), - timeout=options.get("timeout", 10), + timeout=options.get("timeout", 20), folder=folder, prompt=prompt, on_node_complete=_on_test_complete @@ -454,7 +454,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/sync_handler.html b/docs/connpy/cli/sync_handler.html index 4c751be..4ddd115 100644 --- a/docs/connpy/cli/sync_handler.html +++ b/docs/connpy/cli/sync_handler.html @@ -3,7 +3,7 @@ - + connpy.cli.sync_handler API documentation @@ -427,7 +427,7 @@ el.replaceWith(d); diff --git a/docs/connpy/cli/terminal_ui.html b/docs/connpy/cli/terminal_ui.html new file mode 100644 index 0000000..cebfcc5 --- /dev/null +++ b/docs/connpy/cli/terminal_ui.html @@ -0,0 +1,899 @@ + + + + + + +connpy.cli.terminal_ui API documentation + + + + + + + + + + + +
      +
      +
      +

      Module connpy.cli.terminal_ui

      +
      +
      +
      +
      +
      +
      +
      +
      +
      +
      +

      Classes

      +
      +
      +class CopilotInterface +(config,
      history=None,
      pt_input=None,
      pt_output=None,
      rich_file=None,
      session_state=None)
      +
      +
      +
      + +Expand source code + +
      class CopilotInterface:
      +    def __init__(self, config, history=None, pt_input=None, pt_output=None, rich_file=None, session_state=None):
      +        self.config = config
      +        self.history = history or InMemoryHistory()
      +        self.pt_input = pt_input
      +        self.pt_output = pt_output
      +        self.ai_service = AIService(config)
      +        self.session_state = session_state if session_state is not None else {
      +            'persona': 'engineer',
      +            'trust_mode': False,
      +            'memories': [],
      +            'os': None,
      +            'prompt': None
      +        }
      +
      +        if rich_file:
      +            self.console = Console(theme=connpy_theme, force_terminal=True, file=rich_file)
      +        else:
      +            self.console = Console(theme=connpy_theme)
      +
      +        self.mode_range, self.mode_single, self.mode_lines = 0, 1, 2 
      +
      +    def _get_theme_color(self, style_name: str, fallback: str = "white") -> str:
      +        """Extract Hex or ANSI color name from the active rich theme."""
      +        try:
      +            style = connpy_theme.styles.get(style_name)
      +            if style and style.color:
      +                # If it's a standard color like 'green', Rich might return its hex triplet
      +                if style.color.is_default: return fallback
      +                return style.color.triplet.hex if style.color.triplet else style.color.name
      +        except: pass
      +        return fallback
      +
      +    async def run_session(self, 
      +                          raw_bytes: bytes, 
      +                          cmd_byte_positions: List[tuple], 
      +                          node_info: dict,
      +                          on_ai_call: Callable):
      +        """
      +        Runs the interactive Copilot session.
      +        on_ai_call: async function(active_buffer, question) -> result_dict
      +        """
      +        from rich.rule import Rule
      +        
      +        try:
      +            # Prepare UI state
      +            buffer = log_cleaner(raw_bytes.decode(errors='replace'))
      +            blocks = self.ai_service.build_context_blocks(raw_bytes, cmd_byte_positions, node_info)
      +            last_line = buffer.split('\n')[-1].strip() if buffer.strip() else "(prompt)"
      +            blocks.append((len(raw_bytes), last_line[:80]))
      +            
      +            state = {
      +                'context_cmd': 1,
      +                'total_cmds': len(blocks),
      +                'total_lines': len(buffer.split('\n')),
      +                'context_lines': min(50, len(buffer.split('\n'))),
      +                'context_mode': self.mode_range,
      +                'cancelled': False,
      +                'toolbar_msg': '',
      +                'msg_expiry': 0
      +            }
      +            
      +            # 1. Visual Separation
      +            self.console.print("") # Salto de lΓ­nea real
      +            self.console.print(Rule(title="[bold cyan] AI TERMINAL COPILOT [/bold cyan]", style="cyan"))
      +            self.console.print(Panel(
      +                "[dim]Type your question. Enter to send, Escape/Ctrl+C to cancel.\n"
      +                "Tab to change context mode. Ctrl+\u2191/\u2193 to adjust context. \u2191\u2193 for question history.[/dim]",
      +                border_style="cyan"
      +            ))
      +            self.console.print("\n") # PequeΓ±o espacio antes del prompt del copilot
      +
      +            bindings = KeyBindings()
      +            @bindings.add('c-up')
      +            def _(event):
      +                if state['context_mode'] == self.mode_lines:
      +                    state['context_lines'] = min(state['context_lines'] + 50, state['total_lines'])
      +                else:
      +                    state['context_cmd'] = min(state['context_cmd'] + 1, state['total_cmds'])
      +                event.app.invalidate()
      +            @bindings.add('c-down')
      +            def _(event):
      +                if state['context_mode'] == self.mode_lines:
      +                    state['context_lines'] = max(state['context_lines'] - 50, min(50, state['total_lines']))
      +                else:
      +                    state['context_cmd'] = max(state['context_cmd'] - 1, 1)
      +                event.app.invalidate()
      +            @bindings.add('tab')
      +            def _(event):
      +                buf = event.current_buffer
      +                # If typing a slash command (no spaces yet), use tab to autocomplete inline
      +                if buf.text.startswith('/') and ' ' not in buf.text:
      +                    buf.complete_next()
      +                else:
      +                    state['context_mode'] = (state['context_mode'] + 1) % 3
      +                    event.app.invalidate()
      +            @bindings.add('escape', eager=True)
      +            @bindings.add('c-c')
      +            def _(event):
      +                state['cancelled'] = True
      +                event.app.exit(result='')
      +
      +            def get_active_buffer():
      +                if state['context_mode'] == self.mode_lines:
      +                    return '\n'.join(buffer.split('\n')[-state['context_lines']:])
      +                idx = max(0, state['total_cmds'] - state['context_cmd'])
      +                start, preview = blocks[idx]
      +                if state['context_mode'] == self.mode_single and idx + 1 < state['total_cmds']:
      +                    end = blocks[idx + 1][0]
      +                    active_raw = raw_bytes[start:end]
      +                else:
      +                    active_raw = raw_bytes[start:]
      +                return preview + "\n" + log_cleaner(active_raw.decode(errors='replace'))
      +
      +            def get_prompt_text():
      +                import html
      +                # Always use user_prompt color for the Ask prompt
      +                color = self._get_theme_color("user_prompt", "cyan")
      +                
      +                if state['context_mode'] == self.mode_lines:
      +                    text = html.escape(f"Ask [Ctx: {state['context_lines']}/{state['total_lines']}L]: ")
      +                    return HTML(f'<style fg="{color}">{text}</style>')
      +                active = get_active_buffer()
      +                lines_count = len(active.split('\n'))
      +                mode_str = {self.mode_range: "Range", self.mode_single: "Cmd"}[state['context_mode']]
      +                text = html.escape(f"Ask [{mode_str} {state['context_cmd']} ~{lines_count}L]: ")
      +                return HTML(f'<style fg="{color}">{text}</style>')
      +
      +            from prompt_toolkit.application.current import get_app
      +
      +            def get_toolbar():
      +                import html
      +                app = get_app()
      +                c_warning = self._get_theme_color("warning", "yellow")
      +                
      +                if app and app.current_buffer:
      +                    text = app.current_buffer.text
      +                    # Solo mostrar ayuda de comandos si estamos escribiendo el primer comando y no hay espacios
      +                    if text.startswith('/') and ' ' not in text:
      +                        commands = ['/os', '/prompt', '/architect', '/engineer', '/trust', '/untrust', '/memorize', '/clear']
      +                        matches = [c for c in commands if c.startswith(text.lower())]
      +                        if matches:
      +                            m_text = html.escape(f"Available: {' '.join(matches)}")
      +                            return HTML(f'<style fg="{c_warning}">{m_text}</style>' + " " * 20)
      +
      +                m_label = {self.mode_range: "RANGE", self.mode_single: "SINGLE", self.mode_lines: "LINES"}[state['context_mode']]
      +                if state['context_mode'] == self.mode_lines:
      +                    base_str = f'\u25b6 Ctrl+\u2191/\u2193 adjusts by 50 lines  [Tab: {m_label}]'
      +                else:
      +                    idx = max(0, state['total_cmds'] - state['context_cmd'])
      +                    desc = blocks[idx][1]
      +                    base_str = f'\u25b6 {desc}  [Tab: {m_label}]'
      +                
      +                # Wrap base_str in a style to maintain consistency and avoid glitches
      +                # The fg color will be inherited from bottom-toolbar global style if not specified here
      +                base_html = f'<span>{html.escape(base_str)}</span>'
      +                
      +                res_html = base_html
      +                if state.get('toolbar_msg'):
      +                    if time.time() < state.get('msg_expiry', 0):
      +                        msg = html.escape(state['toolbar_msg'])
      +                        res_html = f'<style fg="{c_warning}">βš™οΈ {msg}</style> | ' + base_html
      +                    else:
      +                        state['toolbar_msg'] = ''
      +                
      +                # Pad with spaces to ensure the line is cleared when the message disappears
      +                return HTML(res_html + " " * 20)
      +
      +            from prompt_toolkit.completion import Completer, Completion
      +            class SlashCommandCompleter(Completer):
      +                def get_completions(self, document, complete_event):
      +                    text = document.text_before_cursor
      +                    if text.startswith('/'):
      +                        parts = text.split()
      +                        # Only autocomplete the first word
      +                        if len(parts) <= 1 or (len(parts) == 1 and not text.endswith(' ')):
      +                            cmd_part = parts[0] if parts else text
      +                            commands = [
      +                                ('/os', 'Set device OS (e.g. cisco_ios)'),
      +                                ('/prompt', 'Override prompt regex'),
      +                                ('/architect', 'Switch to Architect persona'),
      +                                ('/engineer', 'Switch to Engineer persona'),
      +                                ('/trust', 'Enable auto-execute'),
      +                                ('/untrust', 'Disable auto-execute'),
      +                                ('/memorize', 'Add fact to memory'),
      +                                ('/clear', 'Clear memory')
      +                            ]
      +                            for cmd, desc in commands:
      +                                if cmd.startswith(cmd_part.lower()):
      +                                    yield Completion(cmd, start_position=-len(cmd_part), display_meta=desc)
      +
      +            copilot_completer = SlashCommandCompleter()
      +
      +            while True:
      +                # 2. Ask question
      +                from prompt_toolkit.styles import Style
      +                c_contrast = self._get_theme_color("contrast", "gray")
      +                ui_style = Style.from_dict({
      +                    'bottom-toolbar': f'fg:{c_contrast}',
      +                })
      +                
      +                session = PromptSession(
      +                    history=self.history, 
      +                    input=self.pt_input, 
      +                    output=self.pt_output,
      +                    completer=copilot_completer,
      +                    reserve_space_for_menu=0,
      +                    style=ui_style
      +                )
      +                try:
      +                    # Usamos un try/finally interno para asegurar que si algo falla en prompt_async,
      +                    # no nos quedemos con la terminal en un estado extraΓ±o.
      +                    question = await session.prompt_async(
      +                        get_prompt_text, 
      +                        key_bindings=bindings, 
      +                        bottom_toolbar=get_toolbar
      +                    )
      +                except (KeyboardInterrupt, EOFError):
      +                    state['cancelled'] = True
      +                    question = ""
      +                
      +                if state['cancelled'] or not question.strip() or question.strip().lower() in ['cancel', 'exit', 'quit']:
      +                    return "cancel", None, None
      +
      +                # 3. Process Input via AIService
      +                directive = self.ai_service.process_copilot_input(question, self.session_state)
      +                
      +                if directive["action"] == "state_update":
      +                    state['toolbar_msg'] = directive['message']
      +                    state['msg_expiry'] = time.time() + 3 # 3 seconds timeout
      +                    
      +                    async def delayed_refresh():
      +                        await asyncio.sleep(3.1)
      +                        # Only invalidate if the message hasn't been replaced by a newer one
      +                        if state.get('toolbar_msg') == directive['message']:
      +                            state['toolbar_msg'] = '' # Explicitly clear
      +                            try:
      +                                from prompt_toolkit.application.current import get_app
      +                                app = get_app()
      +                                if app: app.invalidate()
      +                            except: pass
      +                    asyncio.create_task(delayed_refresh())
      +
      +                    # Mover el cursor arriba y limpiar la lΓ­nea para que el nuevo prompt reemplace al anterior
      +                    sys.stdout.write('\x1b[1A\x1b[2K')
      +                    sys.stdout.flush()
      +                    continue
      +                else:
      +                    # Limpiar el mensaje de la barra cuando se hace una pregunta real
      +                    state['toolbar_msg'] = ''
      +                
      +                clean_question = directive.get("clean_prompt", question)
      +                overrides = directive.get("overrides", {})
      +                
      +                # Merge node_info with session_state and overrides
      +                merged_node_info = node_info.copy()
      +                if self.session_state['os']: merged_node_info['os'] = self.session_state['os']
      +                if self.session_state['prompt']: merged_node_info['prompt'] = self.session_state['prompt']
      +                merged_node_info['persona'] = self.session_state['persona']
      +                merged_node_info['trust'] = self.session_state['trust_mode']
      +                merged_node_info['memories'] = list(self.session_state['memories'])
      +                
      +                for k, v in overrides.items():
      +                    merged_node_info[k] = v
      +
      +                # Enrich question
      +                past = self.history.get_strings()
      +                if len(past) > 1:
      +                    clean_past = [q for q in past[-6:-1] if not q.startswith('/')]
      +                    if clean_past:
      +                        history_text = "\n".join(f"- {q}" for q in clean_past)
      +                        clean_question = f"Previous questions:\n{history_text}\n\nCurrent Question:\n{clean_question}"
      +
      +                # 3. AI Execution
      +                # Use persona from overrides (one-shot) or from session state
      +                active_persona = merged_node_info.get('persona', self.session_state.get('persona', 'engineer'))
      +                persona_color = self._get_theme_color(active_persona, fallback="cyan")
      +                
      +                active_buffer = get_active_buffer()
      +                live_text = "Thinking..."
      +                panel = Panel(live_text, title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color)
      +                
      +                def on_chunk(text):
      +                    nonlocal live_text
      +                    if live_text == "Thinking...": live_text = ""
      +                    live_text += text
      +                
      +                with Live(panel, console=self.console, refresh_per_second=10) as live:
      +                    def update_live(t):
      +                        live.update(Panel(Markdown(t), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color))
      +
      +                    wrapped_chunk = lambda t: (on_chunk(t), update_live(live_text))
      +                    
      +                    # Check for interruption during AI call
      +                    ai_task = asyncio.create_task(on_ai_call(active_buffer, clean_question, wrapped_chunk, merged_node_info))
      +                    
      +                    try:
      +                        while not ai_task.done():
      +                            await asyncio.sleep(0.05)
      +                        result = await ai_task
      +                    except asyncio.CancelledError:
      +                        return "cancel", None, None
      +
      +                if not result or result.get("error"):
      +                    if result and result.get("error"): self.console.print(f"[red]Error: {result['error']}[/red]")
      +                    return "cancel", None, None
      +
      +                # 4. Handle result
      +                if live_text == "Thinking..." and result.get("guide"):
      +                    self.console.print(Panel(Markdown(result["guide"]), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color))
      +
      +                commands = result.get("commands", [])
      +                if not commands:
      +                    self.console.print("")
      +                    return "continue", None, None
      +
      +                risk = result.get("risk_level", "low")
      +                risk_style = {"low": "success", "high": "warning", "destructive": "error"}.get(risk, "success")
      +                style_color = self._get_theme_color(risk_style, fallback="green")
      +                
      +                cmd_text = "\n".join(f"  {i+1}. {c}" for i, c in enumerate(commands))
      +                # Explicitly use 'bold style_color' for both TITLE and BORDER to ensure maximum consistency
      +                self.console.print(Panel(cmd_text, title=f"[bold {style_color}]Suggested Commands [{risk.upper()}][/bold {style_color}]", border_style=f"bold {style_color}"))
      +
      +                if merged_node_info.get('trust', False) and risk != "destructive":
      +                    self.console.print(f"[dim]βš™οΈ Auto-executing (Trust Mode)[/dim]")
      +                    return "send_all", commands, None
      +
      +                confirm_session = PromptSession(input=self.pt_input, output=self.pt_output)
      +                c_bindings = KeyBindings()
      +                @c_bindings.add('escape', eager=True)
      +                @c_bindings.add('c-c')
      +                def _(ev): ev.app.exit(result='n')
      +                
      +                import html
      +                try:
      +                    p_text = html.escape(f"Send? (y/n/e/range) [n]: ")
      +                    # Use the EXACT same style_color and force bold="true" for Prompt-Toolkit
      +                    action = await confirm_session.prompt_async(HTML(f'<style fg="{style_color}" bold="true">{p_text}</style>'), key_bindings=c_bindings)
      +                except (KeyboardInterrupt, EOFError):
      +                    self.console.print("")
      +                    return "continue", None, None
      +
      +                def parse_indices(text, max_len):
      +                    """Helper to parse '1-3, 5, 7' into [0, 1, 2, 4, 6]."""
      +                    indices = []
      +                    # Replace commas with spaces and split
      +                    parts = text.replace(',', ' ').split()
      +                    for part in parts:
      +                        if '-' in part:
      +                            try:
      +                                start, end = map(int, part.split('-'))
      +                                # Ensure inclusive and 0-indexed
      +                                indices.extend(range(start-1, end))
      +                            except: continue
      +                        elif part.isdigit():
      +                            indices.append(int(part)-1)
      +                    # Filter valid indices and remove duplicates
      +                    return [i for i in sorted(set(indices)) if 0 <= i < max_len]
      +
      +                action_l = (action or "n").lower().strip()
      +                if action_l in ('y', 'yes', 'all'):
      +                    return "send_all", commands, None
      +                
      +                # Check for numeric selection (e.g., "1, 2-4")
      +                if re.match(r'^[0-9,\-\s]+$', action_l):
      +                    selected_idxs = parse_indices(action_l, len(commands))
      +                    if selected_idxs:
      +                        return "send_all", [commands[i] for i in selected_idxs], None
      +
      +                elif action_l.startswith('e'):
      +                    # Check if it's a selective edit like 'e1-2'
      +                    selection_str = action_l[1:].strip()
      +                    if selection_str:
      +                        idxs = parse_indices(selection_str, len(commands))
      +                        cmds_to_edit = [commands[i] for i in idxs] if idxs else commands
      +                    else:
      +                        cmds_to_edit = commands
      +
      +                    target = "\n".join(cmds_to_edit)
      +                    e_bindings = KeyBindings()
      +                    @e_bindings.add('c-j')
      +                    def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
      +                    @e_bindings.add('escape', 'enter')
      +                    def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
      +                    @e_bindings.add('escape')
      +                    def _(ev): ev.app.exit(result='')
      +                    
      +                    c_edit = self._get_theme_color("user_prompt", "cyan")
      +                    import html
      +                    e_text = html.escape("Edit (Ctrl+Enter or Esc+Enter to submit):\n")
      +                    try:
      +                        edited = await confirm_session.prompt_async(
      +                            HTML(f'<style fg="{c_edit}">{e_text}</style>'),
      +                            default=target, multiline=True, key_bindings=e_bindings
      +                        )
      +                    except (KeyboardInterrupt, EOFError):
      +                        self.console.print("")
      +                        return "continue", None, None
      +
      +                    if edited and edited.strip():
      +                        # Split by lines to ensure core.py applies delay between each command
      +                        lines = [l.strip() for l in edited.split('\n') if l.strip()]
      +                        return "custom", None, lines
      +                    
      +                self.console.print("")
      +                return "continue", None, None
      +            
      +            return "cancel", None, None
      +
      +        finally:
      +            state['cancelled'] = True
      +            self.console.print("[dim]Returning to session...[/dim]")
      +
      +
      +

      Methods

      +
      +
      +async def run_session(self,
      raw_bytes:Β bytes,
      cmd_byte_positions:Β List[tuple],
      node_info:Β dict,
      on_ai_call:Β Callable)
      +
      +
      +
      + +Expand source code + +
      async def run_session(self, 
      +                      raw_bytes: bytes, 
      +                      cmd_byte_positions: List[tuple], 
      +                      node_info: dict,
      +                      on_ai_call: Callable):
      +    """
      +    Runs the interactive Copilot session.
      +    on_ai_call: async function(active_buffer, question) -> result_dict
      +    """
      +    from rich.rule import Rule
      +    
      +    try:
      +        # Prepare UI state
      +        buffer = log_cleaner(raw_bytes.decode(errors='replace'))
      +        blocks = self.ai_service.build_context_blocks(raw_bytes, cmd_byte_positions, node_info)
      +        last_line = buffer.split('\n')[-1].strip() if buffer.strip() else "(prompt)"
      +        blocks.append((len(raw_bytes), last_line[:80]))
      +        
      +        state = {
      +            'context_cmd': 1,
      +            'total_cmds': len(blocks),
      +            'total_lines': len(buffer.split('\n')),
      +            'context_lines': min(50, len(buffer.split('\n'))),
      +            'context_mode': self.mode_range,
      +            'cancelled': False,
      +            'toolbar_msg': '',
      +            'msg_expiry': 0
      +        }
      +        
      +        # 1. Visual Separation
      +        self.console.print("") # Salto de lΓ­nea real
      +        self.console.print(Rule(title="[bold cyan] AI TERMINAL COPILOT [/bold cyan]", style="cyan"))
      +        self.console.print(Panel(
      +            "[dim]Type your question. Enter to send, Escape/Ctrl+C to cancel.\n"
      +            "Tab to change context mode. Ctrl+\u2191/\u2193 to adjust context. \u2191\u2193 for question history.[/dim]",
      +            border_style="cyan"
      +        ))
      +        self.console.print("\n") # PequeΓ±o espacio antes del prompt del copilot
      +
      +        bindings = KeyBindings()
      +        @bindings.add('c-up')
      +        def _(event):
      +            if state['context_mode'] == self.mode_lines:
      +                state['context_lines'] = min(state['context_lines'] + 50, state['total_lines'])
      +            else:
      +                state['context_cmd'] = min(state['context_cmd'] + 1, state['total_cmds'])
      +            event.app.invalidate()
      +        @bindings.add('c-down')
      +        def _(event):
      +            if state['context_mode'] == self.mode_lines:
      +                state['context_lines'] = max(state['context_lines'] - 50, min(50, state['total_lines']))
      +            else:
      +                state['context_cmd'] = max(state['context_cmd'] - 1, 1)
      +            event.app.invalidate()
      +        @bindings.add('tab')
      +        def _(event):
      +            buf = event.current_buffer
      +            # If typing a slash command (no spaces yet), use tab to autocomplete inline
      +            if buf.text.startswith('/') and ' ' not in buf.text:
      +                buf.complete_next()
      +            else:
      +                state['context_mode'] = (state['context_mode'] + 1) % 3
      +                event.app.invalidate()
      +        @bindings.add('escape', eager=True)
      +        @bindings.add('c-c')
      +        def _(event):
      +            state['cancelled'] = True
      +            event.app.exit(result='')
      +
      +        def get_active_buffer():
      +            if state['context_mode'] == self.mode_lines:
      +                return '\n'.join(buffer.split('\n')[-state['context_lines']:])
      +            idx = max(0, state['total_cmds'] - state['context_cmd'])
      +            start, preview = blocks[idx]
      +            if state['context_mode'] == self.mode_single and idx + 1 < state['total_cmds']:
      +                end = blocks[idx + 1][0]
      +                active_raw = raw_bytes[start:end]
      +            else:
      +                active_raw = raw_bytes[start:]
      +            return preview + "\n" + log_cleaner(active_raw.decode(errors='replace'))
      +
      +        def get_prompt_text():
      +            import html
      +            # Always use user_prompt color for the Ask prompt
      +            color = self._get_theme_color("user_prompt", "cyan")
      +            
      +            if state['context_mode'] == self.mode_lines:
      +                text = html.escape(f"Ask [Ctx: {state['context_lines']}/{state['total_lines']}L]: ")
      +                return HTML(f'<style fg="{color}">{text}</style>')
      +            active = get_active_buffer()
      +            lines_count = len(active.split('\n'))
      +            mode_str = {self.mode_range: "Range", self.mode_single: "Cmd"}[state['context_mode']]
      +            text = html.escape(f"Ask [{mode_str} {state['context_cmd']} ~{lines_count}L]: ")
      +            return HTML(f'<style fg="{color}">{text}</style>')
      +
      +        from prompt_toolkit.application.current import get_app
      +
      +        def get_toolbar():
      +            import html
      +            app = get_app()
      +            c_warning = self._get_theme_color("warning", "yellow")
      +            
      +            if app and app.current_buffer:
      +                text = app.current_buffer.text
      +                # Solo mostrar ayuda de comandos si estamos escribiendo el primer comando y no hay espacios
      +                if text.startswith('/') and ' ' not in text:
      +                    commands = ['/os', '/prompt', '/architect', '/engineer', '/trust', '/untrust', '/memorize', '/clear']
      +                    matches = [c for c in commands if c.startswith(text.lower())]
      +                    if matches:
      +                        m_text = html.escape(f"Available: {' '.join(matches)}")
      +                        return HTML(f'<style fg="{c_warning}">{m_text}</style>' + " " * 20)
      +
      +            m_label = {self.mode_range: "RANGE", self.mode_single: "SINGLE", self.mode_lines: "LINES"}[state['context_mode']]
      +            if state['context_mode'] == self.mode_lines:
      +                base_str = f'\u25b6 Ctrl+\u2191/\u2193 adjusts by 50 lines  [Tab: {m_label}]'
      +            else:
      +                idx = max(0, state['total_cmds'] - state['context_cmd'])
      +                desc = blocks[idx][1]
      +                base_str = f'\u25b6 {desc}  [Tab: {m_label}]'
      +            
      +            # Wrap base_str in a style to maintain consistency and avoid glitches
      +            # The fg color will be inherited from bottom-toolbar global style if not specified here
      +            base_html = f'<span>{html.escape(base_str)}</span>'
      +            
      +            res_html = base_html
      +            if state.get('toolbar_msg'):
      +                if time.time() < state.get('msg_expiry', 0):
      +                    msg = html.escape(state['toolbar_msg'])
      +                    res_html = f'<style fg="{c_warning}">βš™οΈ {msg}</style> | ' + base_html
      +                else:
      +                    state['toolbar_msg'] = ''
      +            
      +            # Pad with spaces to ensure the line is cleared when the message disappears
      +            return HTML(res_html + " " * 20)
      +
      +        from prompt_toolkit.completion import Completer, Completion
      +        class SlashCommandCompleter(Completer):
      +            def get_completions(self, document, complete_event):
      +                text = document.text_before_cursor
      +                if text.startswith('/'):
      +                    parts = text.split()
      +                    # Only autocomplete the first word
      +                    if len(parts) <= 1 or (len(parts) == 1 and not text.endswith(' ')):
      +                        cmd_part = parts[0] if parts else text
      +                        commands = [
      +                            ('/os', 'Set device OS (e.g. cisco_ios)'),
      +                            ('/prompt', 'Override prompt regex'),
      +                            ('/architect', 'Switch to Architect persona'),
      +                            ('/engineer', 'Switch to Engineer persona'),
      +                            ('/trust', 'Enable auto-execute'),
      +                            ('/untrust', 'Disable auto-execute'),
      +                            ('/memorize', 'Add fact to memory'),
      +                            ('/clear', 'Clear memory')
      +                        ]
      +                        for cmd, desc in commands:
      +                            if cmd.startswith(cmd_part.lower()):
      +                                yield Completion(cmd, start_position=-len(cmd_part), display_meta=desc)
      +
      +        copilot_completer = SlashCommandCompleter()
      +
      +        while True:
      +            # 2. Ask question
      +            from prompt_toolkit.styles import Style
      +            c_contrast = self._get_theme_color("contrast", "gray")
      +            ui_style = Style.from_dict({
      +                'bottom-toolbar': f'fg:{c_contrast}',
      +            })
      +            
      +            session = PromptSession(
      +                history=self.history, 
      +                input=self.pt_input, 
      +                output=self.pt_output,
      +                completer=copilot_completer,
      +                reserve_space_for_menu=0,
      +                style=ui_style
      +            )
      +            try:
      +                # Usamos un try/finally interno para asegurar que si algo falla en prompt_async,
      +                # no nos quedemos con la terminal en un estado extraΓ±o.
      +                question = await session.prompt_async(
      +                    get_prompt_text, 
      +                    key_bindings=bindings, 
      +                    bottom_toolbar=get_toolbar
      +                )
      +            except (KeyboardInterrupt, EOFError):
      +                state['cancelled'] = True
      +                question = ""
      +            
      +            if state['cancelled'] or not question.strip() or question.strip().lower() in ['cancel', 'exit', 'quit']:
      +                return "cancel", None, None
      +
      +            # 3. Process Input via AIService
      +            directive = self.ai_service.process_copilot_input(question, self.session_state)
      +            
      +            if directive["action"] == "state_update":
      +                state['toolbar_msg'] = directive['message']
      +                state['msg_expiry'] = time.time() + 3 # 3 seconds timeout
      +                
      +                async def delayed_refresh():
      +                    await asyncio.sleep(3.1)
      +                    # Only invalidate if the message hasn't been replaced by a newer one
      +                    if state.get('toolbar_msg') == directive['message']:
      +                        state['toolbar_msg'] = '' # Explicitly clear
      +                        try:
      +                            from prompt_toolkit.application.current import get_app
      +                            app = get_app()
      +                            if app: app.invalidate()
      +                        except: pass
      +                asyncio.create_task(delayed_refresh())
      +
      +                # Mover el cursor arriba y limpiar la lΓ­nea para que el nuevo prompt reemplace al anterior
      +                sys.stdout.write('\x1b[1A\x1b[2K')
      +                sys.stdout.flush()
      +                continue
      +            else:
      +                # Limpiar el mensaje de la barra cuando se hace una pregunta real
      +                state['toolbar_msg'] = ''
      +            
      +            clean_question = directive.get("clean_prompt", question)
      +            overrides = directive.get("overrides", {})
      +            
      +            # Merge node_info with session_state and overrides
      +            merged_node_info = node_info.copy()
      +            if self.session_state['os']: merged_node_info['os'] = self.session_state['os']
      +            if self.session_state['prompt']: merged_node_info['prompt'] = self.session_state['prompt']
      +            merged_node_info['persona'] = self.session_state['persona']
      +            merged_node_info['trust'] = self.session_state['trust_mode']
      +            merged_node_info['memories'] = list(self.session_state['memories'])
      +            
      +            for k, v in overrides.items():
      +                merged_node_info[k] = v
      +
      +            # Enrich question
      +            past = self.history.get_strings()
      +            if len(past) > 1:
      +                clean_past = [q for q in past[-6:-1] if not q.startswith('/')]
      +                if clean_past:
      +                    history_text = "\n".join(f"- {q}" for q in clean_past)
      +                    clean_question = f"Previous questions:\n{history_text}\n\nCurrent Question:\n{clean_question}"
      +
      +            # 3. AI Execution
      +            # Use persona from overrides (one-shot) or from session state
      +            active_persona = merged_node_info.get('persona', self.session_state.get('persona', 'engineer'))
      +            persona_color = self._get_theme_color(active_persona, fallback="cyan")
      +            
      +            active_buffer = get_active_buffer()
      +            live_text = "Thinking..."
      +            panel = Panel(live_text, title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color)
      +            
      +            def on_chunk(text):
      +                nonlocal live_text
      +                if live_text == "Thinking...": live_text = ""
      +                live_text += text
      +            
      +            with Live(panel, console=self.console, refresh_per_second=10) as live:
      +                def update_live(t):
      +                    live.update(Panel(Markdown(t), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color))
      +
      +                wrapped_chunk = lambda t: (on_chunk(t), update_live(live_text))
      +                
      +                # Check for interruption during AI call
      +                ai_task = asyncio.create_task(on_ai_call(active_buffer, clean_question, wrapped_chunk, merged_node_info))
      +                
      +                try:
      +                    while not ai_task.done():
      +                        await asyncio.sleep(0.05)
      +                    result = await ai_task
      +                except asyncio.CancelledError:
      +                    return "cancel", None, None
      +
      +            if not result or result.get("error"):
      +                if result and result.get("error"): self.console.print(f"[red]Error: {result['error']}[/red]")
      +                return "cancel", None, None
      +
      +            # 4. Handle result
      +            if live_text == "Thinking..." and result.get("guide"):
      +                self.console.print(Panel(Markdown(result["guide"]), title=f"[bold {persona_color}]Copilot Guide[/bold {persona_color}]", border_style=persona_color))
      +
      +            commands = result.get("commands", [])
      +            if not commands:
      +                self.console.print("")
      +                return "continue", None, None
      +
      +            risk = result.get("risk_level", "low")
      +            risk_style = {"low": "success", "high": "warning", "destructive": "error"}.get(risk, "success")
      +            style_color = self._get_theme_color(risk_style, fallback="green")
      +            
      +            cmd_text = "\n".join(f"  {i+1}. {c}" for i, c in enumerate(commands))
      +            # Explicitly use 'bold style_color' for both TITLE and BORDER to ensure maximum consistency
      +            self.console.print(Panel(cmd_text, title=f"[bold {style_color}]Suggested Commands [{risk.upper()}][/bold {style_color}]", border_style=f"bold {style_color}"))
      +
      +            if merged_node_info.get('trust', False) and risk != "destructive":
      +                self.console.print(f"[dim]βš™οΈ Auto-executing (Trust Mode)[/dim]")
      +                return "send_all", commands, None
      +
      +            confirm_session = PromptSession(input=self.pt_input, output=self.pt_output)
      +            c_bindings = KeyBindings()
      +            @c_bindings.add('escape', eager=True)
      +            @c_bindings.add('c-c')
      +            def _(ev): ev.app.exit(result='n')
      +            
      +            import html
      +            try:
      +                p_text = html.escape(f"Send? (y/n/e/range) [n]: ")
      +                # Use the EXACT same style_color and force bold="true" for Prompt-Toolkit
      +                action = await confirm_session.prompt_async(HTML(f'<style fg="{style_color}" bold="true">{p_text}</style>'), key_bindings=c_bindings)
      +            except (KeyboardInterrupt, EOFError):
      +                self.console.print("")
      +                return "continue", None, None
      +
      +            def parse_indices(text, max_len):
      +                """Helper to parse '1-3, 5, 7' into [0, 1, 2, 4, 6]."""
      +                indices = []
      +                # Replace commas with spaces and split
      +                parts = text.replace(',', ' ').split()
      +                for part in parts:
      +                    if '-' in part:
      +                        try:
      +                            start, end = map(int, part.split('-'))
      +                            # Ensure inclusive and 0-indexed
      +                            indices.extend(range(start-1, end))
      +                        except: continue
      +                    elif part.isdigit():
      +                        indices.append(int(part)-1)
      +                # Filter valid indices and remove duplicates
      +                return [i for i in sorted(set(indices)) if 0 <= i < max_len]
      +
      +            action_l = (action or "n").lower().strip()
      +            if action_l in ('y', 'yes', 'all'):
      +                return "send_all", commands, None
      +            
      +            # Check for numeric selection (e.g., "1, 2-4")
      +            if re.match(r'^[0-9,\-\s]+$', action_l):
      +                selected_idxs = parse_indices(action_l, len(commands))
      +                if selected_idxs:
      +                    return "send_all", [commands[i] for i in selected_idxs], None
      +
      +            elif action_l.startswith('e'):
      +                # Check if it's a selective edit like 'e1-2'
      +                selection_str = action_l[1:].strip()
      +                if selection_str:
      +                    idxs = parse_indices(selection_str, len(commands))
      +                    cmds_to_edit = [commands[i] for i in idxs] if idxs else commands
      +                else:
      +                    cmds_to_edit = commands
      +
      +                target = "\n".join(cmds_to_edit)
      +                e_bindings = KeyBindings()
      +                @e_bindings.add('c-j')
      +                def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
      +                @e_bindings.add('escape', 'enter')
      +                def _(ev): ev.app.exit(result=ev.app.current_buffer.text)
      +                @e_bindings.add('escape')
      +                def _(ev): ev.app.exit(result='')
      +                
      +                c_edit = self._get_theme_color("user_prompt", "cyan")
      +                import html
      +                e_text = html.escape("Edit (Ctrl+Enter or Esc+Enter to submit):\n")
      +                try:
      +                    edited = await confirm_session.prompt_async(
      +                        HTML(f'<style fg="{c_edit}">{e_text}</style>'),
      +                        default=target, multiline=True, key_bindings=e_bindings
      +                    )
      +                except (KeyboardInterrupt, EOFError):
      +                    self.console.print("")
      +                    return "continue", None, None
      +
      +                if edited and edited.strip():
      +                    # Split by lines to ensure core.py applies delay between each command
      +                    lines = [l.strip() for l in edited.split('\n') if l.strip()]
      +                    return "custom", None, lines
      +                
      +            self.console.print("")
      +            return "continue", None, None
      +        
      +        return "cancel", None, None
      +
      +    finally:
      +        state['cancelled'] = True
      +        self.console.print("[dim]Returning to session...[/dim]")
      +
      +

      Runs the interactive Copilot session. +on_ai_call: async function(active_buffer, question) -> result_dict

      +
      +
      +
      +
      +
      +
      + +
      + + + diff --git a/docs/connpy/cli/validators.html b/docs/connpy/cli/validators.html index ee2f6b6..3bbf7cd 100644 --- a/docs/connpy/cli/validators.html +++ b/docs/connpy/cli/validators.html @@ -3,7 +3,7 @@ - + connpy.cli.validators API documentation @@ -508,7 +508,7 @@ el.replaceWith(d); diff --git a/docs/connpy/grpc_layer/connpy_pb2.html b/docs/connpy/grpc_layer/connpy_pb2.html index 3f4a0e0..4d42e1f 100644 --- a/docs/connpy/grpc_layer/connpy_pb2.html +++ b/docs/connpy/grpc_layer/connpy_pb2.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.connpy_pb2 API documentation @@ -62,7 +62,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -81,7 +81,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -100,7 +100,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -119,7 +119,45 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      +
      +
      + +
      +class CopilotRequest +(*args, **kwargs) +
      +
      +

      A ProtocolMessage

      +

      Ancestors

      +
        +
      • google._upb._message.Message
      • +
      • google.protobuf.message.Message
      • +
      +

      Class variables

      +
      +
      var DESCRIPTOR
      +
      +
      +
      +
      +
      +
      +class CopilotResponse +(*args, **kwargs) +
      +
      +

      A ProtocolMessage

      +

      Ancestors

      +
        +
      • google._upb._message.Message
      • +
      • google.protobuf.message.Message
      • +
      +

      Class variables

      +
      +
      var DESCRIPTOR
      +
      +
      @@ -138,7 +176,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -157,7 +195,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -176,7 +214,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -195,7 +233,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -214,7 +252,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -233,7 +271,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -252,7 +290,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -271,7 +309,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -290,7 +328,26 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      +
      +
      + +
      +class MCPRequest +(*args, **kwargs) +
      +
      +

      A ProtocolMessage

      +

      Ancestors

      +
        +
      • google._upb._message.Message
      • +
      • google.protobuf.message.Message
      • +
      +

      Class variables

      +
      +
      var DESCRIPTOR
      +
      +
      @@ -309,7 +366,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -328,7 +385,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -347,7 +404,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -366,7 +423,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -385,7 +442,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -404,7 +461,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -423,7 +480,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -442,7 +499,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -461,7 +518,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -480,7 +537,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -499,7 +556,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -518,7 +575,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -537,7 +594,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -556,7 +613,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -575,7 +632,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -594,7 +651,7 @@ el.replaceWith(d);
      var DESCRIPTOR
      -

      The type of the None singleton.

      +
      @@ -638,6 +695,18 @@ el.replaceWith(d);
  • +

    CopilotRequest

    + +
  • +
  • +

    CopilotResponse

    + +
  • +
  • DeleteRequest

    • DESCRIPTOR
    • @@ -692,6 +761,12 @@ el.replaceWith(d);
  • +

    MCPRequest

    + +
  • +
  • MessageValue

    • DESCRIPTOR
    • @@ -793,7 +868,7 @@ el.replaceWith(d); diff --git a/docs/connpy/grpc_layer/connpy_pb2_grpc.html b/docs/connpy/grpc_layer/connpy_pb2_grpc.html index 7554fb0..c01da87 100644 --- a/docs/connpy/grpc_layer/connpy_pb2_grpc.html +++ b/docs/connpy/grpc_layer/connpy_pb2_grpc.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.connpy_pb2_grpc API documentation @@ -57,33 +57,43 @@ el.replaceWith(d); rpc_method_handlers = { 'ask': grpc.stream_stream_rpc_method_handler( servicer.ask, - request_deserializer=connpy__pb2.AskRequest.FromString, - response_serializer=connpy__pb2.AIResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.AskRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.AIResponse.SerializeToString, ), 'confirm': grpc.unary_unary_rpc_method_handler( servicer.confirm, - request_deserializer=connpy__pb2.StringRequest.FromString, - response_serializer=connpy__pb2.BoolResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.BoolResponse.SerializeToString, + ), + 'ask_copilot': grpc.unary_unary_rpc_method_handler( + servicer.ask_copilot, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.CopilotRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.CopilotResponse.SerializeToString, ), 'list_sessions': grpc.unary_unary_rpc_method_handler( servicer.list_sessions, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=connpy__pb2.ValueResponse.SerializeToString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.SerializeToString, ), 'delete_session': grpc.unary_unary_rpc_method_handler( servicer.delete_session, - request_deserializer=connpy__pb2.StringRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'configure_provider': grpc.unary_unary_rpc_method_handler( servicer.configure_provider, - request_deserializer=connpy__pb2.ProviderRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ProviderRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'configure_mcp': grpc.unary_unary_rpc_method_handler( + servicer.configure_mcp, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.MCPRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'load_session_data': grpc.unary_unary_rpc_method_handler( servicer.load_session_data, - request_deserializer=connpy__pb2.StringRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -106,32 +116,32 @@ el.replaceWith(d); 'get_settings': grpc.unary_unary_rpc_method_handler( servicer.get_settings, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), 'get_default_dir': grpc.unary_unary_rpc_method_handler( servicer.get_default_dir, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=connpy__pb2.StringResponse.SerializeToString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StringResponse.SerializeToString, ), 'set_config_folder': grpc.unary_unary_rpc_method_handler( servicer.set_config_folder, - request_deserializer=connpy__pb2.StringRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'update_setting': grpc.unary_unary_rpc_method_handler( servicer.update_setting, - request_deserializer=connpy__pb2.UpdateRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.UpdateRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'encrypt_password': grpc.unary_unary_rpc_method_handler( servicer.encrypt_password, - request_deserializer=connpy__pb2.StringRequest.FromString, - response_serializer=connpy__pb2.StringResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StringResponse.SerializeToString, ), 'apply_theme_from_file': grpc.unary_unary_rpc_method_handler( servicer.apply_theme_from_file, - request_deserializer=connpy__pb2.StringRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -153,23 +163,23 @@ el.replaceWith(d); rpc_method_handlers = { 'run_commands': grpc.unary_stream_rpc_method_handler( servicer.run_commands, - request_deserializer=connpy__pb2.RunRequest.FromString, - response_serializer=connpy__pb2.NodeRunResult.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.RunRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.NodeRunResult.SerializeToString, ), 'test_commands': grpc.unary_stream_rpc_method_handler( servicer.test_commands, - request_deserializer=connpy__pb2.TestRequest.FromString, - response_serializer=connpy__pb2.NodeRunResult.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.TestRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.NodeRunResult.SerializeToString, ), 'run_cli_script': grpc.unary_unary_rpc_method_handler( servicer.run_cli_script, - request_deserializer=connpy__pb2.ScriptRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ScriptRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), 'run_yaml_playbook': grpc.unary_unary_rpc_method_handler( servicer.run_yaml_playbook, - request_deserializer=connpy__pb2.ScriptRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ScriptRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -191,17 +201,17 @@ el.replaceWith(d); rpc_method_handlers = { 'export_to_file': grpc.unary_unary_rpc_method_handler( servicer.export_to_file, - request_deserializer=connpy__pb2.ExportRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ExportRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'import_from_file': grpc.unary_unary_rpc_method_handler( servicer.import_from_file, - request_deserializer=connpy__pb2.StringRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'set_reserved_names': grpc.unary_unary_rpc_method_handler( servicer.set_reserved_names, - request_deserializer=connpy__pb2.ListRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ListRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } @@ -224,23 +234,23 @@ el.replaceWith(d); rpc_method_handlers = { 'list_nodes': grpc.unary_unary_rpc_method_handler( servicer.list_nodes, - request_deserializer=connpy__pb2.FilterRequest.FromString, - response_serializer=connpy__pb2.ValueResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.FilterRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.SerializeToString, ), 'list_folders': grpc.unary_unary_rpc_method_handler( servicer.list_folders, - request_deserializer=connpy__pb2.FilterRequest.FromString, - response_serializer=connpy__pb2.ValueResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.FilterRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.SerializeToString, ), 'get_node_details': grpc.unary_unary_rpc_method_handler( servicer.get_node_details, - request_deserializer=connpy__pb2.IdRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), 'explode_unique': grpc.unary_unary_rpc_method_handler( servicer.explode_unique, - request_deserializer=connpy__pb2.IdRequest.FromString, - response_serializer=connpy__pb2.ValueResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.SerializeToString, ), 'generate_cache': grpc.unary_unary_rpc_method_handler( servicer.generate_cache, @@ -249,53 +259,53 @@ el.replaceWith(d); ), 'add_node': grpc.unary_unary_rpc_method_handler( servicer.add_node, - request_deserializer=connpy__pb2.NodeRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'update_node': grpc.unary_unary_rpc_method_handler( servicer.update_node, - request_deserializer=connpy__pb2.NodeRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'delete_node': grpc.unary_unary_rpc_method_handler( servicer.delete_node, - request_deserializer=connpy__pb2.DeleteRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.DeleteRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'move_node': grpc.unary_unary_rpc_method_handler( servicer.move_node, - request_deserializer=connpy__pb2.MoveRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.MoveRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'bulk_add': grpc.unary_unary_rpc_method_handler( servicer.bulk_add, - request_deserializer=connpy__pb2.BulkRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.BulkRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'validate_parent_folder': grpc.unary_unary_rpc_method_handler( servicer.validate_parent_folder, - request_deserializer=connpy__pb2.IdRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'set_reserved_names': grpc.unary_unary_rpc_method_handler( servicer.set_reserved_names, - request_deserializer=connpy__pb2.ListRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ListRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'interact_node': grpc.stream_stream_rpc_method_handler( servicer.interact_node, - request_deserializer=connpy__pb2.InteractRequest.FromString, - response_serializer=connpy__pb2.InteractResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.InteractRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.InteractResponse.SerializeToString, ), 'full_replace': grpc.unary_unary_rpc_method_handler( servicer.full_replace, - request_deserializer=connpy__pb2.FullReplaceRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'get_inventory': grpc.unary_unary_rpc_method_handler( servicer.get_inventory, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=connpy__pb2.FullReplaceRequest.SerializeToString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -318,26 +328,26 @@ el.replaceWith(d); 'list_plugins': grpc.unary_unary_rpc_method_handler( servicer.list_plugins, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=connpy__pb2.ValueResponse.SerializeToString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.SerializeToString, ), 'add_plugin': grpc.unary_unary_rpc_method_handler( servicer.add_plugin, - request_deserializer=connpy__pb2.PluginRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.PluginRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'delete_plugin': grpc.unary_unary_rpc_method_handler( servicer.delete_plugin, - request_deserializer=connpy__pb2.IdRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'enable_plugin': grpc.unary_unary_rpc_method_handler( servicer.enable_plugin, - request_deserializer=connpy__pb2.IdRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'disable_plugin': grpc.unary_unary_rpc_method_handler( servicer.disable_plugin, - request_deserializer=connpy__pb2.IdRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } @@ -360,32 +370,32 @@ el.replaceWith(d); rpc_method_handlers = { 'list_profiles': grpc.unary_unary_rpc_method_handler( servicer.list_profiles, - request_deserializer=connpy__pb2.FilterRequest.FromString, - response_serializer=connpy__pb2.ValueResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.FilterRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.SerializeToString, ), 'get_profile': grpc.unary_unary_rpc_method_handler( servicer.get_profile, - request_deserializer=connpy__pb2.ProfileRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.ProfileRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), 'add_profile': grpc.unary_unary_rpc_method_handler( servicer.add_profile, - request_deserializer=connpy__pb2.NodeRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'resolve_node_data': grpc.unary_unary_rpc_method_handler( servicer.resolve_node_data, - request_deserializer=connpy__pb2.StructRequest.FromString, - response_serializer=connpy__pb2.StructResponse.SerializeToString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.StructRequest.FromString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.SerializeToString, ), 'delete_profile': grpc.unary_unary_rpc_method_handler( servicer.delete_profile, - request_deserializer=connpy__pb2.IdRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'update_profile': grpc.unary_unary_rpc_method_handler( servicer.update_profile, - request_deserializer=connpy__pb2.NodeRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } @@ -408,12 +418,12 @@ el.replaceWith(d); rpc_method_handlers = { 'start_api': grpc.unary_unary_rpc_method_handler( servicer.start_api, - request_deserializer=connpy__pb2.IntRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IntRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'debug_api': grpc.unary_unary_rpc_method_handler( servicer.debug_api, - request_deserializer=connpy__pb2.IntRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IntRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'stop_api': grpc.unary_unary_rpc_method_handler( @@ -423,13 +433,13 @@ el.replaceWith(d); ), 'restart_api': grpc.unary_unary_rpc_method_handler( servicer.restart_api, - request_deserializer=connpy__pb2.IntRequest.FromString, + request_deserializer=connpy_dot_proto_dot_connpy__pb2.IntRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'get_api_status': grpc.unary_unary_rpc_method_handler( servicer.get_api_status, request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - response_serializer=connpy__pb2.BoolResponse.SerializeToString, + response_serializer=connpy_dot_proto_dot_connpy__pb2.BoolResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( @@ -470,8 +480,8 @@ el.replaceWith(d); request_iterator, target, '/connpy.AIService/ask', - connpy__pb2.AskRequest.SerializeToString, - connpy__pb2.AIResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.AskRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.AIResponse.FromString, options, channel_credentials, insecure, @@ -497,8 +507,35 @@ el.replaceWith(d); request, target, '/connpy.AIService/confirm', - connpy__pb2.StringRequest.SerializeToString, - connpy__pb2.BoolResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.BoolResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ask_copilot(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/connpy.AIService/ask_copilot', + connpy_dot_proto_dot_connpy__pb2.CopilotRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.CopilotResponse.FromString, options, channel_credentials, insecure, @@ -525,7 +562,7 @@ el.replaceWith(d); target, '/connpy.AIService/list_sessions', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -551,7 +588,7 @@ el.replaceWith(d); request, target, '/connpy.AIService/delete_session', - connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -578,7 +615,34 @@ el.replaceWith(d); request, target, '/connpy.AIService/configure_provider', - connpy__pb2.ProviderRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ProviderRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def configure_mcp(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/connpy.AIService/configure_mcp', + connpy_dot_proto_dot_connpy__pb2.MCPRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -605,8 +669,8 @@ el.replaceWith(d); request, target, '/connpy.AIService/load_session_data', - connpy__pb2.StringRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -643,8 +707,82 @@ def ask(request_iterator, request_iterator, target, '/connpy.AIService/ask', - connpy__pb2.AskRequest.SerializeToString, - connpy__pb2.AIResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.AskRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.AIResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + +
      + +
      +def ask_copilot(request,
      target,
      options=(),
      channel_credentials=None,
      call_credentials=None,
      insecure=False,
      compression=None,
      wait_for_ready=None,
      timeout=None,
      metadata=None)
      +
      +
      +
      + +Expand source code + +
      @staticmethod
      +def ask_copilot(request,
      +        target,
      +        options=(),
      +        channel_credentials=None,
      +        call_credentials=None,
      +        insecure=False,
      +        compression=None,
      +        wait_for_ready=None,
      +        timeout=None,
      +        metadata=None):
      +    return grpc.experimental.unary_unary(
      +        request,
      +        target,
      +        '/connpy.AIService/ask_copilot',
      +        connpy_dot_proto_dot_connpy__pb2.CopilotRequest.SerializeToString,
      +        connpy_dot_proto_dot_connpy__pb2.CopilotResponse.FromString,
      +        options,
      +        channel_credentials,
      +        insecure,
      +        call_credentials,
      +        compression,
      +        wait_for_ready,
      +        timeout,
      +        metadata,
      +        _registered_method=True)
      +
      +
      +
      +
      +def configure_mcp(request,
      target,
      options=(),
      channel_credentials=None,
      call_credentials=None,
      insecure=False,
      compression=None,
      wait_for_ready=None,
      timeout=None,
      metadata=None)
      +
      +
      +
      + +Expand source code + +
      @staticmethod
      +def configure_mcp(request,
      +        target,
      +        options=(),
      +        channel_credentials=None,
      +        call_credentials=None,
      +        insecure=False,
      +        compression=None,
      +        wait_for_ready=None,
      +        timeout=None,
      +        metadata=None):
      +    return grpc.experimental.unary_unary(
      +        request,
      +        target,
      +        '/connpy.AIService/configure_mcp',
      +        connpy_dot_proto_dot_connpy__pb2.MCPRequest.SerializeToString,
      +        google_dot_protobuf_dot_empty__pb2.Empty.FromString,
               options,
               channel_credentials,
               insecure,
      @@ -680,7 +818,7 @@ def configure_provider(request,
               request,
               target,
               '/connpy.AIService/configure_provider',
      -        connpy__pb2.ProviderRequest.SerializeToString,
      +        connpy_dot_proto_dot_connpy__pb2.ProviderRequest.SerializeToString,
               google_dot_protobuf_dot_empty__pb2.Empty.FromString,
               options,
               channel_credentials,
      @@ -717,8 +855,8 @@ def confirm(request,
               request,
               target,
               '/connpy.AIService/confirm',
      -        connpy__pb2.StringRequest.SerializeToString,
      -        connpy__pb2.BoolResponse.FromString,
      +        connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString,
      +        connpy_dot_proto_dot_connpy__pb2.BoolResponse.FromString,
               options,
               channel_credentials,
               insecure,
      @@ -754,7 +892,7 @@ def delete_session(request,
               request,
               target,
               '/connpy.AIService/delete_session',
      -        connpy__pb2.StringRequest.SerializeToString,
      +        connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString,
               google_dot_protobuf_dot_empty__pb2.Empty.FromString,
               options,
               channel_credentials,
      @@ -792,7 +930,7 @@ def list_sessions(request,
               target,
               '/connpy.AIService/list_sessions',
               google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
      -        connpy__pb2.ValueResponse.FromString,
      +        connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString,
               options,
               channel_credentials,
               insecure,
      @@ -828,8 +966,8 @@ def load_session_data(request,
               request,
               target,
               '/connpy.AIService/load_session_data',
      -        connpy__pb2.StringRequest.SerializeToString,
      -        connpy__pb2.StructResponse.FromString,
      +        connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString,
      +        connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString,
               options,
               channel_credentials,
               insecure,
      @@ -867,6 +1005,12 @@ def load_session_data(request,
               context.set_details('Method not implemented!')
               raise NotImplementedError('Method not implemented!')
       
      +    def ask_copilot(self, request, context):
      +        """Missing associated documentation comment in .proto file."""
      +        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
      +        context.set_details('Method not implemented!')
      +        raise NotImplementedError('Method not implemented!')
      +
           def list_sessions(self, request, context):
               """Missing associated documentation comment in .proto file."""
               context.set_code(grpc.StatusCode.UNIMPLEMENTED)
      @@ -885,6 +1029,12 @@ def load_session_data(request,
               context.set_details('Method not implemented!')
               raise NotImplementedError('Method not implemented!')
       
      +    def configure_mcp(self, request, context):
      +        """Missing associated documentation comment in .proto file."""
      +        context.set_code(grpc.StatusCode.UNIMPLEMENTED)
      +        context.set_details('Method not implemented!')
      +        raise NotImplementedError('Method not implemented!')
      +
           def load_session_data(self, request, context):
               """Missing associated documentation comment in .proto file."""
               context.set_code(grpc.StatusCode.UNIMPLEMENTED)
      @@ -914,6 +1064,38 @@ def load_session_data(request,
       

      Missing associated documentation comment in .proto file.

      +
      +def ask_copilot(self, request, context) +
      +
      +
      + +Expand source code + +
      def ask_copilot(self, request, context):
      +    """Missing associated documentation comment in .proto file."""
      +    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
      +    context.set_details('Method not implemented!')
      +    raise NotImplementedError('Method not implemented!')
      +
      +

      Missing associated documentation comment in .proto file.

      +
      +
      +def configure_mcp(self, request, context) +
      +
      +
      + +Expand source code + +
      def configure_mcp(self, request, context):
      +    """Missing associated documentation comment in .proto file."""
      +    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
      +    context.set_details('Method not implemented!')
      +    raise NotImplementedError('Method not implemented!')
      +
      +

      Missing associated documentation comment in .proto file.

      +
      def configure_provider(self, request, context)
      @@ -1016,33 +1198,43 @@ def load_session_data(request, """ self.ask = channel.stream_stream( '/connpy.AIService/ask', - request_serializer=connpy__pb2.AskRequest.SerializeToString, - response_deserializer=connpy__pb2.AIResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.AskRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.AIResponse.FromString, _registered_method=True) self.confirm = channel.unary_unary( '/connpy.AIService/confirm', - request_serializer=connpy__pb2.StringRequest.SerializeToString, - response_deserializer=connpy__pb2.BoolResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.BoolResponse.FromString, + _registered_method=True) + self.ask_copilot = channel.unary_unary( + '/connpy.AIService/ask_copilot', + request_serializer=connpy_dot_proto_dot_connpy__pb2.CopilotRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.CopilotResponse.FromString, _registered_method=True) self.list_sessions = channel.unary_unary( '/connpy.AIService/list_sessions', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=connpy__pb2.ValueResponse.FromString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, _registered_method=True) self.delete_session = channel.unary_unary( '/connpy.AIService/delete_session', - request_serializer=connpy__pb2.StringRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.configure_provider = channel.unary_unary( '/connpy.AIService/configure_provider', - request_serializer=connpy__pb2.ProviderRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ProviderRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + _registered_method=True) + self.configure_mcp = channel.unary_unary( + '/connpy.AIService/configure_mcp', + request_serializer=connpy_dot_proto_dot_connpy__pb2.MCPRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.load_session_data = channel.unary_unary( '/connpy.AIService/load_session_data', - request_serializer=connpy__pb2.StringRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True)

      Missing associated documentation comment in .proto file.

      @@ -1080,7 +1272,7 @@ def load_session_data(request, target, '/connpy.ConfigService/get_settings', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1107,7 +1299,7 @@ def load_session_data(request, target, '/connpy.ConfigService/get_default_dir', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.StringResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringResponse.FromString, options, channel_credentials, insecure, @@ -1133,7 +1325,7 @@ def load_session_data(request, request, target, '/connpy.ConfigService/set_config_folder', - connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -1160,7 +1352,7 @@ def load_session_data(request, request, target, '/connpy.ConfigService/update_setting', - connpy__pb2.UpdateRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.UpdateRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -1187,8 +1379,8 @@ def load_session_data(request, request, target, '/connpy.ConfigService/encrypt_password', - connpy__pb2.StringRequest.SerializeToString, - connpy__pb2.StringResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringResponse.FromString, options, channel_credentials, insecure, @@ -1214,8 +1406,8 @@ def load_session_data(request, request, target, '/connpy.ConfigService/apply_theme_from_file', - connpy__pb2.StringRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1252,8 +1444,8 @@ def apply_theme_from_file(request, request, target, '/connpy.ConfigService/apply_theme_from_file', - connpy__pb2.StringRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1289,8 +1481,8 @@ def encrypt_password(request, request, target, '/connpy.ConfigService/encrypt_password', - connpy__pb2.StringRequest.SerializeToString, - connpy__pb2.StringResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringResponse.FromString, options, channel_credentials, insecure, @@ -1327,7 +1519,7 @@ def get_default_dir(request, target, '/connpy.ConfigService/get_default_dir', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.StringResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StringResponse.FromString, options, channel_credentials, insecure, @@ -1364,7 +1556,7 @@ def get_settings(request, target, '/connpy.ConfigService/get_settings', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1400,7 +1592,7 @@ def set_config_folder(request, request, target, '/connpy.ConfigService/set_config_folder', - connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -1437,7 +1629,7 @@ def update_setting(request, request, target, '/connpy.ConfigService/update_setting', - connpy__pb2.UpdateRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.UpdateRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -1626,32 +1818,32 @@ def update_setting(request, self.get_settings = channel.unary_unary( '/connpy.ConfigService/get_settings', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True) self.get_default_dir = channel.unary_unary( '/connpy.ConfigService/get_default_dir', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=connpy__pb2.StringResponse.FromString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StringResponse.FromString, _registered_method=True) self.set_config_folder = channel.unary_unary( '/connpy.ConfigService/set_config_folder', - request_serializer=connpy__pb2.StringRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.update_setting = channel.unary_unary( '/connpy.ConfigService/update_setting', - request_serializer=connpy__pb2.UpdateRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.UpdateRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.encrypt_password = channel.unary_unary( '/connpy.ConfigService/encrypt_password', - request_serializer=connpy__pb2.StringRequest.SerializeToString, - response_deserializer=connpy__pb2.StringResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StringResponse.FromString, _registered_method=True) self.apply_theme_from_file = channel.unary_unary( '/connpy.ConfigService/apply_theme_from_file', - request_serializer=connpy__pb2.StringRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True)

      Missing associated documentation comment in .proto file.

      @@ -1688,8 +1880,8 @@ def update_setting(request, request, target, '/connpy.ExecutionService/run_commands', - connpy__pb2.RunRequest.SerializeToString, - connpy__pb2.NodeRunResult.FromString, + connpy_dot_proto_dot_connpy__pb2.RunRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRunResult.FromString, options, channel_credentials, insecure, @@ -1715,8 +1907,8 @@ def update_setting(request, request, target, '/connpy.ExecutionService/test_commands', - connpy__pb2.TestRequest.SerializeToString, - connpy__pb2.NodeRunResult.FromString, + connpy_dot_proto_dot_connpy__pb2.TestRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRunResult.FromString, options, channel_credentials, insecure, @@ -1742,8 +1934,8 @@ def update_setting(request, request, target, '/connpy.ExecutionService/run_cli_script', - connpy__pb2.ScriptRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ScriptRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1769,8 +1961,8 @@ def update_setting(request, request, target, '/connpy.ExecutionService/run_yaml_playbook', - connpy__pb2.ScriptRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ScriptRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1807,8 +1999,8 @@ def run_cli_script(request, request, target, '/connpy.ExecutionService/run_cli_script', - connpy__pb2.ScriptRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ScriptRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1844,8 +2036,8 @@ def run_commands(request, request, target, '/connpy.ExecutionService/run_commands', - connpy__pb2.RunRequest.SerializeToString, - connpy__pb2.NodeRunResult.FromString, + connpy_dot_proto_dot_connpy__pb2.RunRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRunResult.FromString, options, channel_credentials, insecure, @@ -1881,8 +2073,8 @@ def run_yaml_playbook(request, request, target, '/connpy.ExecutionService/run_yaml_playbook', - connpy__pb2.ScriptRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ScriptRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -1918,8 +2110,8 @@ def test_commands(request, request, target, '/connpy.ExecutionService/test_commands', - connpy__pb2.TestRequest.SerializeToString, - connpy__pb2.NodeRunResult.FromString, + connpy_dot_proto_dot_connpy__pb2.TestRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRunResult.FromString, options, channel_credentials, insecure, @@ -2062,23 +2254,23 @@ def test_commands(request, """ self.run_commands = channel.unary_stream( '/connpy.ExecutionService/run_commands', - request_serializer=connpy__pb2.RunRequest.SerializeToString, - response_deserializer=connpy__pb2.NodeRunResult.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.RunRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.NodeRunResult.FromString, _registered_method=True) self.test_commands = channel.unary_stream( '/connpy.ExecutionService/test_commands', - request_serializer=connpy__pb2.TestRequest.SerializeToString, - response_deserializer=connpy__pb2.NodeRunResult.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.TestRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.NodeRunResult.FromString, _registered_method=True) self.run_cli_script = channel.unary_unary( '/connpy.ExecutionService/run_cli_script', - request_serializer=connpy__pb2.ScriptRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ScriptRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True) self.run_yaml_playbook = channel.unary_unary( '/connpy.ExecutionService/run_yaml_playbook', - request_serializer=connpy__pb2.ScriptRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ScriptRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True)

      Missing associated documentation comment in .proto file.

      @@ -2115,7 +2307,7 @@ def test_commands(request, request, target, '/connpy.ImportExportService/export_to_file', - connpy__pb2.ExportRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ExportRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2142,7 +2334,7 @@ def test_commands(request, request, target, '/connpy.ImportExportService/import_from_file', - connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2169,7 +2361,7 @@ def test_commands(request, request, target, '/connpy.ImportExportService/set_reserved_names', - connpy__pb2.ListRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ListRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2207,7 +2399,7 @@ def export_to_file(request, request, target, '/connpy.ImportExportService/export_to_file', - connpy__pb2.ExportRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ExportRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2244,7 +2436,7 @@ def import_from_file(request, request, target, '/connpy.ImportExportService/import_from_file', - connpy__pb2.StringRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2281,7 +2473,7 @@ def set_reserved_names(request, request, target, '/connpy.ImportExportService/set_reserved_names', - connpy__pb2.ListRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ListRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2403,17 +2595,17 @@ def set_reserved_names(request, """ self.export_to_file = channel.unary_unary( '/connpy.ImportExportService/export_to_file', - request_serializer=connpy__pb2.ExportRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ExportRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.import_from_file = channel.unary_unary( '/connpy.ImportExportService/import_from_file', - request_serializer=connpy__pb2.StringRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StringRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.set_reserved_names = channel.unary_unary( '/connpy.ImportExportService/set_reserved_names', - request_serializer=connpy__pb2.ListRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ListRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) @@ -2451,8 +2643,8 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/list_nodes', - connpy__pb2.FilterRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -2478,8 +2670,8 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/list_folders', - connpy__pb2.FilterRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -2505,8 +2697,8 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/get_node_details', - connpy__pb2.IdRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -2532,8 +2724,8 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/explode_unique', - connpy__pb2.IdRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -2586,7 +2778,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/add_node', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2613,7 +2805,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/update_node', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2640,7 +2832,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/delete_node', - connpy__pb2.DeleteRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.DeleteRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2667,7 +2859,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/move_node', - connpy__pb2.MoveRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.MoveRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2694,7 +2886,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/bulk_add', - connpy__pb2.BulkRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.BulkRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2721,7 +2913,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/validate_parent_folder', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2748,7 +2940,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/set_reserved_names', - connpy__pb2.ListRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ListRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2775,8 +2967,8 @@ def set_reserved_names(request, request_iterator, target, '/connpy.NodeService/interact_node', - connpy__pb2.InteractRequest.SerializeToString, - connpy__pb2.InteractResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.InteractRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.InteractResponse.FromString, options, channel_credentials, insecure, @@ -2802,7 +2994,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/full_replace', - connpy__pb2.FullReplaceRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2830,7 +3022,7 @@ def set_reserved_names(request, target, '/connpy.NodeService/get_inventory', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.FullReplaceRequest.FromString, + connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.FromString, options, channel_credentials, insecure, @@ -2867,7 +3059,7 @@ def add_node(request, request, target, '/connpy.NodeService/add_node', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2904,7 +3096,7 @@ def bulk_add(request, request, target, '/connpy.NodeService/bulk_add', - connpy__pb2.BulkRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.BulkRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2941,7 +3133,7 @@ def delete_node(request, request, target, '/connpy.NodeService/delete_node', - connpy__pb2.DeleteRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.DeleteRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -2978,8 +3170,8 @@ def explode_unique(request, request, target, '/connpy.NodeService/explode_unique', - connpy__pb2.IdRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -3015,7 +3207,7 @@ def full_replace(request, request, target, '/connpy.NodeService/full_replace', - connpy__pb2.FullReplaceRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3090,7 +3282,7 @@ def get_inventory(request, target, '/connpy.NodeService/get_inventory', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.FullReplaceRequest.FromString, + connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.FromString, options, channel_credentials, insecure, @@ -3126,8 +3318,8 @@ def get_node_details(request, request, target, '/connpy.NodeService/get_node_details', - connpy__pb2.IdRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -3163,8 +3355,8 @@ def interact_node(request_iterator, request_iterator, target, '/connpy.NodeService/interact_node', - connpy__pb2.InteractRequest.SerializeToString, - connpy__pb2.InteractResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.InteractRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.InteractResponse.FromString, options, channel_credentials, insecure, @@ -3200,8 +3392,8 @@ def list_folders(request, request, target, '/connpy.NodeService/list_folders', - connpy__pb2.FilterRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -3237,8 +3429,8 @@ def list_nodes(request, request, target, '/connpy.NodeService/list_nodes', - connpy__pb2.FilterRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -3274,7 +3466,7 @@ def move_node(request, request, target, '/connpy.NodeService/move_node', - connpy__pb2.MoveRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.MoveRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3311,7 +3503,7 @@ def set_reserved_names(request, request, target, '/connpy.NodeService/set_reserved_names', - connpy__pb2.ListRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ListRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3348,7 +3540,7 @@ def update_node(request, request, target, '/connpy.NodeService/update_node', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3385,7 +3577,7 @@ def validate_parent_folder(request, request, target, '/connpy.NodeService/validate_parent_folder', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3771,23 +3963,23 @@ def validate_parent_folder(request, """ self.list_nodes = channel.unary_unary( '/connpy.NodeService/list_nodes', - request_serializer=connpy__pb2.FilterRequest.SerializeToString, - response_deserializer=connpy__pb2.ValueResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, _registered_method=True) self.list_folders = channel.unary_unary( '/connpy.NodeService/list_folders', - request_serializer=connpy__pb2.FilterRequest.SerializeToString, - response_deserializer=connpy__pb2.ValueResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, _registered_method=True) self.get_node_details = channel.unary_unary( '/connpy.NodeService/get_node_details', - request_serializer=connpy__pb2.IdRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True) self.explode_unique = channel.unary_unary( '/connpy.NodeService/explode_unique', - request_serializer=connpy__pb2.IdRequest.SerializeToString, - response_deserializer=connpy__pb2.ValueResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, _registered_method=True) self.generate_cache = channel.unary_unary( '/connpy.NodeService/generate_cache', @@ -3796,53 +3988,53 @@ def validate_parent_folder(request, _registered_method=True) self.add_node = channel.unary_unary( '/connpy.NodeService/add_node', - request_serializer=connpy__pb2.NodeRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.update_node = channel.unary_unary( '/connpy.NodeService/update_node', - request_serializer=connpy__pb2.NodeRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.delete_node = channel.unary_unary( '/connpy.NodeService/delete_node', - request_serializer=connpy__pb2.DeleteRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.DeleteRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.move_node = channel.unary_unary( '/connpy.NodeService/move_node', - request_serializer=connpy__pb2.MoveRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.MoveRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.bulk_add = channel.unary_unary( '/connpy.NodeService/bulk_add', - request_serializer=connpy__pb2.BulkRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.BulkRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.validate_parent_folder = channel.unary_unary( '/connpy.NodeService/validate_parent_folder', - request_serializer=connpy__pb2.IdRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.set_reserved_names = channel.unary_unary( '/connpy.NodeService/set_reserved_names', - request_serializer=connpy__pb2.ListRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ListRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.interact_node = channel.stream_stream( '/connpy.NodeService/interact_node', - request_serializer=connpy__pb2.InteractRequest.SerializeToString, - response_deserializer=connpy__pb2.InteractResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.InteractRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.InteractResponse.FromString, _registered_method=True) self.full_replace = channel.unary_unary( '/connpy.NodeService/full_replace', - request_serializer=connpy__pb2.FullReplaceRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.get_inventory = channel.unary_unary( '/connpy.NodeService/get_inventory', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=connpy__pb2.FullReplaceRequest.FromString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.FullReplaceRequest.FromString, _registered_method=True)

      Missing associated documentation comment in .proto file.

      @@ -3880,7 +4072,7 @@ def validate_parent_folder(request, target, '/connpy.PluginService/list_plugins', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -3906,7 +4098,7 @@ def validate_parent_folder(request, request, target, '/connpy.PluginService/add_plugin', - connpy__pb2.PluginRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.PluginRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3933,7 +4125,7 @@ def validate_parent_folder(request, request, target, '/connpy.PluginService/delete_plugin', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3960,7 +4152,7 @@ def validate_parent_folder(request, request, target, '/connpy.PluginService/enable_plugin', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -3987,7 +4179,7 @@ def validate_parent_folder(request, request, target, '/connpy.PluginService/disable_plugin', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4025,7 +4217,7 @@ def add_plugin(request, request, target, '/connpy.PluginService/add_plugin', - connpy__pb2.PluginRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.PluginRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4062,7 +4254,7 @@ def delete_plugin(request, request, target, '/connpy.PluginService/delete_plugin', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4099,7 +4291,7 @@ def disable_plugin(request, request, target, '/connpy.PluginService/disable_plugin', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4136,7 +4328,7 @@ def enable_plugin(request, request, target, '/connpy.PluginService/enable_plugin', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4174,7 +4366,7 @@ def list_plugins(request, target, '/connpy.PluginService/list_plugins', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -4340,26 +4532,26 @@ def list_plugins(request, self.list_plugins = channel.unary_unary( '/connpy.PluginService/list_plugins', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=connpy__pb2.ValueResponse.FromString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, _registered_method=True) self.add_plugin = channel.unary_unary( '/connpy.PluginService/add_plugin', - request_serializer=connpy__pb2.PluginRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.PluginRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.delete_plugin = channel.unary_unary( '/connpy.PluginService/delete_plugin', - request_serializer=connpy__pb2.IdRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.enable_plugin = channel.unary_unary( '/connpy.PluginService/enable_plugin', - request_serializer=connpy__pb2.IdRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.disable_plugin = channel.unary_unary( '/connpy.PluginService/disable_plugin', - request_serializer=connpy__pb2.IdRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) @@ -4397,8 +4589,8 @@ def list_plugins(request, request, target, '/connpy.ProfileService/list_profiles', - connpy__pb2.FilterRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -4424,8 +4616,8 @@ def list_plugins(request, request, target, '/connpy.ProfileService/get_profile', - connpy__pb2.ProfileRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ProfileRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -4451,7 +4643,7 @@ def list_plugins(request, request, target, '/connpy.ProfileService/add_profile', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4478,8 +4670,8 @@ def list_plugins(request, request, target, '/connpy.ProfileService/resolve_node_data', - connpy__pb2.StructRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StructRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -4505,7 +4697,7 @@ def list_plugins(request, request, target, '/connpy.ProfileService/delete_profile', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4532,7 +4724,7 @@ def list_plugins(request, request, target, '/connpy.ProfileService/update_profile', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4570,7 +4762,7 @@ def add_profile(request, request, target, '/connpy.ProfileService/add_profile', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4607,7 +4799,7 @@ def delete_profile(request, request, target, '/connpy.ProfileService/delete_profile', - connpy__pb2.IdRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4644,8 +4836,8 @@ def get_profile(request, request, target, '/connpy.ProfileService/get_profile', - connpy__pb2.ProfileRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.ProfileRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -4681,8 +4873,8 @@ def list_profiles(request, request, target, '/connpy.ProfileService/list_profiles', - connpy__pb2.FilterRequest.SerializeToString, - connpy__pb2.ValueResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, options, channel_credentials, insecure, @@ -4718,8 +4910,8 @@ def resolve_node_data(request, request, target, '/connpy.ProfileService/resolve_node_data', - connpy__pb2.StructRequest.SerializeToString, - connpy__pb2.StructResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.StructRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, options, channel_credentials, insecure, @@ -4755,7 +4947,7 @@ def update_profile(request, request, target, '/connpy.ProfileService/update_profile', - connpy__pb2.NodeRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -4943,32 +5135,32 @@ def update_profile(request, """ self.list_profiles = channel.unary_unary( '/connpy.ProfileService/list_profiles', - request_serializer=connpy__pb2.FilterRequest.SerializeToString, - response_deserializer=connpy__pb2.ValueResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.FilterRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.ValueResponse.FromString, _registered_method=True) self.get_profile = channel.unary_unary( '/connpy.ProfileService/get_profile', - request_serializer=connpy__pb2.ProfileRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.ProfileRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True) self.add_profile = channel.unary_unary( '/connpy.ProfileService/add_profile', - request_serializer=connpy__pb2.NodeRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.resolve_node_data = channel.unary_unary( '/connpy.ProfileService/resolve_node_data', - request_serializer=connpy__pb2.StructRequest.SerializeToString, - response_deserializer=connpy__pb2.StructResponse.FromString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.StructRequest.SerializeToString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.StructResponse.FromString, _registered_method=True) self.delete_profile = channel.unary_unary( '/connpy.ProfileService/delete_profile', - request_serializer=connpy__pb2.IdRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IdRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.update_profile = channel.unary_unary( '/connpy.ProfileService/update_profile', - request_serializer=connpy__pb2.NodeRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.NodeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) @@ -5006,7 +5198,7 @@ def update_profile(request, request, target, '/connpy.SystemService/start_api', - connpy__pb2.IntRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -5033,7 +5225,7 @@ def update_profile(request, request, target, '/connpy.SystemService/debug_api', - connpy__pb2.IntRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -5087,7 +5279,7 @@ def update_profile(request, request, target, '/connpy.SystemService/restart_api', - connpy__pb2.IntRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -5115,7 +5307,7 @@ def update_profile(request, target, '/connpy.SystemService/get_api_status', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.BoolResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.BoolResponse.FromString, options, channel_credentials, insecure, @@ -5152,7 +5344,7 @@ def debug_api(request, request, target, '/connpy.SystemService/debug_api', - connpy__pb2.IntRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -5190,7 +5382,7 @@ def get_api_status(request, target, '/connpy.SystemService/get_api_status', google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - connpy__pb2.BoolResponse.FromString, + connpy_dot_proto_dot_connpy__pb2.BoolResponse.FromString, options, channel_credentials, insecure, @@ -5226,7 +5418,7 @@ def restart_api(request, request, target, '/connpy.SystemService/restart_api', - connpy__pb2.IntRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -5263,7 +5455,7 @@ def start_api(request, request, target, '/connpy.SystemService/start_api', - connpy__pb2.IntRequest.SerializeToString, + connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, @@ -5466,12 +5658,12 @@ def stop_api(request, """ self.start_api = channel.unary_unary( '/connpy.SystemService/start_api', - request_serializer=connpy__pb2.IntRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.debug_api = channel.unary_unary( '/connpy.SystemService/debug_api', - request_serializer=connpy__pb2.IntRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.stop_api = channel.unary_unary( @@ -5481,13 +5673,13 @@ def stop_api(request, _registered_method=True) self.restart_api = channel.unary_unary( '/connpy.SystemService/restart_api', - request_serializer=connpy__pb2.IntRequest.SerializeToString, + request_serializer=connpy_dot_proto_dot_connpy__pb2.IntRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, _registered_method=True) self.get_api_status = channel.unary_unary( '/connpy.SystemService/get_api_status', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - response_deserializer=connpy__pb2.BoolResponse.FromString, + response_deserializer=connpy_dot_proto_dot_connpy__pb2.BoolResponse.FromString, _registered_method=True)

      Missing associated documentation comment in .proto file.

      @@ -5529,6 +5721,8 @@ def stop_api(request,

      AIService

      • ask
      • +
      • ask_copilot
      • +
      • configure_mcp
      • configure_provider
      • confirm
      • delete_session
      • @@ -5540,6 +5734,8 @@ def stop_api(request,

        AIServiceServicer

        • ask
        • +
        • ask_copilot
        • +
        • configure_mcp
        • configure_provider
        • confirm
        • delete_session
        • @@ -5735,7 +5931,7 @@ def stop_api(request, diff --git a/docs/connpy/grpc_layer/index.html b/docs/connpy/grpc_layer/index.html index ad05c93..83e1647 100644 --- a/docs/connpy/grpc_layer/index.html +++ b/docs/connpy/grpc_layer/index.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer API documentation @@ -102,7 +102,7 @@ el.replaceWith(d); diff --git a/docs/connpy/grpc_layer/remote_plugin_pb2.html b/docs/connpy/grpc_layer/remote_plugin_pb2.html index c841aa0..6e7bb97 100644 --- a/docs/connpy/grpc_layer/remote_plugin_pb2.html +++ b/docs/connpy/grpc_layer/remote_plugin_pb2.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.remote_plugin_pb2 API documentation @@ -62,7 +62,7 @@ el.replaceWith(d);
          var DESCRIPTOR
          -

          The type of the None singleton.

          +
          @@ -81,7 +81,7 @@ el.replaceWith(d);
          var DESCRIPTOR
          -

          The type of the None singleton.

          +
          @@ -100,7 +100,7 @@ el.replaceWith(d);
          var DESCRIPTOR
          -

          The type of the None singleton.

          +
          @@ -119,7 +119,7 @@ el.replaceWith(d);
          var DESCRIPTOR
          -

          The type of the None singleton.

          +
          @@ -168,7 +168,7 @@ el.replaceWith(d); diff --git a/docs/connpy/grpc_layer/remote_plugin_pb2_grpc.html b/docs/connpy/grpc_layer/remote_plugin_pb2_grpc.html index 61ed251..6372fcd 100644 --- a/docs/connpy/grpc_layer/remote_plugin_pb2_grpc.html +++ b/docs/connpy/grpc_layer/remote_plugin_pb2_grpc.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.remote_plugin_pb2_grpc API documentation @@ -366,7 +366,7 @@ def invoke_plugin(request, diff --git a/docs/connpy/grpc_layer/server.html b/docs/connpy/grpc_layer/server.html index 05a9d91..2a4472a 100644 --- a/docs/connpy/grpc_layer/server.html +++ b/docs/connpy/grpc_layer/server.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.server API documentation @@ -96,7 +96,7 @@ el.replaceWith(d); interceptors = [LoggingInterceptor()] if debug else [] server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), interceptors=interceptors) - connpy_pb2_grpc.add_NodeServiceServicer_to_server(NodeServicer(config), server) + connpy_pb2_grpc.add_NodeServiceServicer_to_server(NodeServicer(config, debug=debug), server) connpy_pb2_grpc.add_ProfileServiceServicer_to_server(ProfileServicer(config), server) connpy_pb2_grpc.add_ConfigServiceServicer_to_server(ConfigServicer(config), server) plugin_servicer = PluginServicer(config) @@ -245,6 +245,22 @@ el.replaceWith(d); res = self.service.confirm(request.value) return connpy_pb2.BoolResponse(value=res) + @handle_errors + def ask_copilot(self, request, context): + import json + node_info = json.loads(request.node_info_json) if request.node_info_json else None + result = self.service.ask_copilot( + request.terminal_buffer, + request.user_question, + node_info + ) + return connpy_pb2.CopilotResponse( + commands=result.get("commands", []), + guide=result.get("guide", ""), + risk_level=result.get("risk_level", "low"), + error=result.get("error") or "" + ) + @handle_errors def list_sessions(self, request, context): return connpy_pb2.ValueResponse(data=to_value(self.service.list_sessions())) @@ -258,6 +274,17 @@ el.replaceWith(d); def configure_provider(self, request, context): self.service.configure_provider(request.provider, request.model, request.api_key) return Empty() + + @handle_errors + def configure_mcp(self, request, context): + self.service.configure_mcp( + request.name, + url=request.url or None, + enabled=request.enabled, + auto_load_on_os=request.auto_load_on_os or None, + remove=request.remove + ) + return Empty() @handle_errors def load_session_data(self, request, context): @@ -273,6 +300,8 @@ el.replaceWith(d);
        • AIServiceServicer:
      class NodeServicer -(config) +(config, debug=False)
      @@ -626,8 +655,13 @@ interceptor chooses to service this RPC, or None otherwise.

      Expand source code
      class NodeServicer(connpy_pb2_grpc.NodeServiceServicer):
      -    def __init__(self, config):
      +    def __init__(self, config, debug=False):
               self.service = NodeService(config)
      +        self.server_debug = debug
      +        if debug:
      +            from rich.console import Console
      +            from ..printer import connpy_theme, get_original_stdout
      +            self.server_console = Console(theme=connpy_theme, file=get_original_stdout())
       
           @handle_errors
           def interact_node(self, request_iterator, context):
      @@ -650,8 +684,8 @@ interceptor chooses to service this RPC, or None otherwise.

      sftp = first_req.sftp debug = first_req.debug - if debug: - printer.console.print(f"[debug][DEBUG][/debug] gRPC interact_node request for: [bold cyan]{unique_id}[/bold cyan]") + if self.server_debug: + self.server_console.print(f"[debug][DEBUG][/debug] gRPC interact_node request for: [bold cyan]{unique_id}[/bold cyan]") if first_req.connection_params_json: import json @@ -710,7 +744,39 @@ interceptor chooses to service this RPC, or None otherwise.

      if sftp: n.protocol = "sftp" - connect = n._connect(debug=debug) + # Build a logger that captures debug messages as ANSI-colored bytes for the client + debug_chunks = [] + if debug: + from io import StringIO + from rich.console import Console as RichConsole + from ..printer import connpy_theme + from .. import printer as _printer + + def remote_logger(msg_type, message): + buf = StringIO() + c = RichConsole(file=buf, force_terminal=True, width=120, theme=connpy_theme) + if msg_type == "debug": + c.print(_printer._format_multiline("i", f"[DEBUG] {message}", style="info")) + elif msg_type == "success": + c.print(_printer._format_multiline("βœ“", message, style="success")) + elif msg_type == "error": + c.print(_printer._format_multiline("βœ—", message, style="error")) + else: + c.print(str(message)) + rendered = buf.getvalue() + if rendered: + # Raw TTY needs \r\n instead of \n + rendered = rendered.replace('\n', '\r\n') + debug_chunks.append(rendered.encode()) + else: + remote_logger = None + + connect = n._connect(debug=debug, logger=remote_logger) + + # Send debug output to client before checking result (always show the command) + for chunk in debug_chunks: + yield connpy_pb2.InteractResponse(stdout_data=chunk) + if connect != True: yield connpy_pb2.InteractResponse(success=False, error_message=str(connect)) return @@ -737,7 +803,160 @@ interceptor chooses to service this RPC, or None otherwise.

      except Exception: pass - asyncio.run(n._async_interact_loop(remote_stream, resize_callback)) + async def remote_copilot_handler(buffer, node_info, stream, child_fd, cmd_byte_positions=None): + import json + import asyncio + import os + + if node_info is None: + node_info = {} + + node_info_json = json.dumps(node_info) + + # Convert buffer to string if it's bytes for the preview + preview_str = buffer[-200:].decode(errors='replace') if isinstance(buffer, bytes) else str(buffer)[-200:] + + # 1. Send prompt to client + response_queue.put(connpy_pb2.InteractResponse( + copilot_prompt=True, + copilot_buffer_preview=preview_str, + copilot_node_info_json=node_info_json + )) + + while True: + # 2. Await the question from client via the copilot_queue + import threading + def preload_ai_deps(): + try: + import litellm + except Exception: + pass + threading.Thread(target=preload_ai_deps, daemon=True).start() + + try: + req_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=120) + if not req_data: return + if "question" not in req_data or not req_data["question"] or req_data["question"] == "CANCEL" or req_data.get("action") == "cancel": + os.write(child_fd, b'\x15\r') + return + question = req_data["question"] + + merged_node_info_str = req_data.get("node_info_json", "") + if merged_node_info_str: + try: + merged_node_info = json.loads(merged_node_info_str) + node_info.update(merged_node_info) + except: pass + + context_buffer = req_data.get("context_buffer", "") + if context_buffer.startswith('{"context_start_pos"'): + try: + parsed = json.loads(context_buffer) + start_pos = parsed["context_start_pos"] + selected_raw = raw_bytes[start_pos:] + context_buffer = n._logclean(selected_raw.decode(errors='replace'), var=True) + except Exception: + context_buffer = buffer + elif not context_buffer: + context_buffer = buffer + except asyncio.TimeoutError: + os.write(child_fd, b'\x15\r') + return + + # 3. Call AI Service with streaming + from ..services.ai_service import AIService + service = AIService(self.service.config) + + def chunk_callback(chunk_text): + if chunk_text: + response_queue.put(connpy_pb2.InteractResponse( + copilot_stream_chunk=chunk_text + )) + + # Create a clean version of node_info for the AI to save tokens and match local CLI behavior + ai_node_info = {k: v for k, v in node_info.items() if k not in ("context_blocks", "full_buffer")} + + ai_task = asyncio.create_task(service.aask_copilot(context_buffer, question, ai_node_info, chunk_callback=chunk_callback)) + wait_action_task = asyncio.create_task(remote_stream.copilot_queue.get()) + + done, pending = await asyncio.wait( + [ai_task, wait_action_task], + return_when=asyncio.FIRST_COMPLETED + ) + + if wait_action_task in done: + req_data = wait_action_task.result() + ai_task.cancel() + if req_data.get("action") == "cancel" or req_data.get("question") == "CANCEL": + os.write(child_fd, b'\x15\r') + return + continue # Loop back instead of returning to keep session alive + else: + wait_action_task.cancel() + result = ai_task.result() + if not result: + os.write(child_fd, b'\x15\r') + return + + # 4. Send response back to client + response_queue.put(connpy_pb2.InteractResponse( + copilot_response_json=json.dumps(result) + )) + + # 5. Wait for user action + try: + action_data = await asyncio.wait_for(remote_stream.copilot_queue.get(), timeout=60) + if not action_data: return + action = action_data.get("action", "cancel") + + if action == "continue": + continue # Loop back for next question + + if action == "cancel": + os.write(child_fd, b'\x15\r') + return + except asyncio.TimeoutError: + os.write(child_fd, b'\x15\r') + return + + if action == "send_all": + commands = result.get("commands", []) + os.write(child_fd, b'\x15') # Ctrl+U to clear line + await asyncio.sleep(0.1) + + # Prepend screen length command to avoid pagination + if "screen_length_command" in n.tags: + os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) + await asyncio.sleep(0.8) + + for cmd in commands: + os.write(child_fd, (cmd + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd)) + await asyncio.sleep(0.8) + return + elif action.startswith("custom:"): + custom_cmds = action[7:] + os.write(child_fd, b'\x15') + await asyncio.sleep(0.1) + + # Prepend screen length command to avoid pagination + if "screen_length_command" in n.tags: + os.write(child_fd, (n.tags["screen_length_command"] + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=n.tags["screen_length_command"])) + await asyncio.sleep(0.8) + + for cmd in custom_cmds.split('\n'): + if cmd.strip(): + os.write(child_fd, (cmd.strip() + "\n").encode()) + response_queue.put(connpy_pb2.InteractResponse(copilot_injected_command=cmd.strip())) + await asyncio.sleep(0.8) + return + else: + os.write(child_fd, b'\x15\r') + return + + asyncio.run(n._async_interact_loop(remote_stream, resize_callback, copilot_handler=remote_copilot_handler)) except Exception as e: pass finally: @@ -746,14 +965,19 @@ interceptor chooses to service this RPC, or None otherwise.

      t_loop = threading.Thread(target=run_async_loop, daemon=True) t_loop.start() + def response_generator(): + while True: + data = response_queue.get() + if data is None: + if self.server_debug: + self.server_console.print(f"[debug][DEBUG][/debug] gRPC interact_node session closed for: [bold cyan]{unique_id}[/bold cyan]") + break + if isinstance(data, connpy_pb2.InteractResponse): + yield data + else: + yield connpy_pb2.InteractResponse(stdout_data=data) + yield from response_generator() - while True: - data = response_queue.get() - if data is None: - if debug: - printer.console.print(f"[debug][DEBUG][/debug] gRPC interact_node session closed for: [bold cyan]{unique_id}[/bold cyan]") - break - yield connpy_pb2.InteractResponse(stdout_data=data) @handle_errors def list_nodes(self, request, context): f = request.filter_str if request.filter_str else None @@ -1319,7 +1543,7 @@ interceptor chooses to service this RPC, or None otherwise.

      diff --git a/docs/connpy/grpc_layer/stubs.html b/docs/connpy/grpc_layer/stubs.html index 836414e..21edc1b 100644 --- a/docs/connpy/grpc_layer/stubs.html +++ b/docs/connpy/grpc_layer/stubs.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.stubs API documentation @@ -200,21 +200,33 @@ el.replaceWith(d); if response.debug_message: if debug: + if live_display: + try: live_display.stop() + except: pass if status: try: status.stop() except: pass printer.console.print(Text.from_ansi(response.debug_message)) - if status: + if live_display: + try: live_display.start() + except: pass + elif status: try: status.start() except: pass continue if response.important_message: + if live_display: + try: live_display.stop() + except: pass if status: try: status.stop() except: pass printer.console.print(Text.from_ansi(response.important_message)) - if status: + if live_display: + try: live_display.start() + except: pass + elif status: try: status.start() except: pass continue @@ -223,14 +235,33 @@ el.replaceWith(d); if response.text_chunk: full_content += response.text_chunk - if status and not debug: - # Update the spinner line with a preview of the response - preview = full_content.replace("\n", " ").strip() - if len(preview) > 60: preview = preview[:57] + "..." - status.update(f"[ai_status]{preview}") + if not live_display: + if status: + try: status.stop() + except: pass + + from rich.console import Console as RichConsole + from ..printer import connpy_theme, get_original_stdout + stable_console = RichConsole(theme=connpy_theme, file=get_original_stdout()) + + # We default to Engineer title during stream, final result will correct it if needed + live_display = Live( + Panel(Markdown(full_content), title="[bold engineer]Network Engineer[/bold engineer]", border_style="engineer", expand=False), + console=stable_console, + refresh_per_second=8, + transient=False + ) + live_display.start() + else: + live_display.update( + Panel(Markdown(full_content), title="[bold engineer]Network Engineer[/bold engineer]", border_style="engineer", expand=False) + ) continue if response.is_final: + if live_display: + try: live_display.stop() + except: pass # Final stop for status to ensure it disappears before the panel if status: try: status.stop() @@ -242,10 +273,13 @@ el.replaceWith(d); role_label = "Network Architect" if responder == "architect" else "Network Engineer" title = f"[bold {alias}]{role_label}[/bold {alias}]" - # Always print the final Panel content_to_print = full_content or final_result.get("response", "") if content_to_print: - printer.console.print(Panel(Markdown(content_to_print), title=title, border_style=alias, expand=False)) + if live_display: + # Re-render the final frame with correct title/colors + live_display.update(Panel(Markdown(content_to_print), title=title, border_style=alias, expand=False)) + else: + printer.console.print(Panel(Markdown(content_to_print), title=title, border_style=alias, expand=False)) break except Exception as e: # Check if it was a gRPC error that we should let handle_errors catch @@ -277,6 +311,17 @@ el.replaceWith(d); req = connpy_pb2.ProviderRequest(provider=provider, model=model or "", api_key=api_key or "") self.stub.configure_provider(req) + @handle_errors + def configure_mcp(self, name, url=None, enabled=True, auto_load_on_os=None, remove=False): + req = connpy_pb2.MCPRequest( + name=name, + url=url or "", + enabled=enabled, + auto_load_on_os=auto_load_on_os or "", + remove=remove + ) + self.stub.configure_mcp(req) + @handle_errors def load_session_data(self, session_id): return from_struct(self.stub.load_session_data(connpy_pb2.StringRequest(value=session_id)).data)
      @@ -393,21 +438,33 @@ def ask(self, input_text, dryrun=False, chat_history=None, session_id=None, debu if response.debug_message: if debug: + if live_display: + try: live_display.stop() + except: pass if status: try: status.stop() except: pass printer.console.print(Text.from_ansi(response.debug_message)) - if status: + if live_display: + try: live_display.start() + except: pass + elif status: try: status.start() except: pass continue if response.important_message: + if live_display: + try: live_display.stop() + except: pass if status: try: status.stop() except: pass printer.console.print(Text.from_ansi(response.important_message)) - if status: + if live_display: + try: live_display.start() + except: pass + elif status: try: status.start() except: pass continue @@ -416,14 +473,33 @@ def ask(self, input_text, dryrun=False, chat_history=None, session_id=None, debu if response.text_chunk: full_content += response.text_chunk - if status and not debug: - # Update the spinner line with a preview of the response - preview = full_content.replace("\n", " ").strip() - if len(preview) > 60: preview = preview[:57] + "..." - status.update(f"[ai_status]{preview}") + if not live_display: + if status: + try: status.stop() + except: pass + + from rich.console import Console as RichConsole + from ..printer import connpy_theme, get_original_stdout + stable_console = RichConsole(theme=connpy_theme, file=get_original_stdout()) + + # We default to Engineer title during stream, final result will correct it if needed + live_display = Live( + Panel(Markdown(full_content), title="[bold engineer]Network Engineer[/bold engineer]", border_style="engineer", expand=False), + console=stable_console, + refresh_per_second=8, + transient=False + ) + live_display.start() + else: + live_display.update( + Panel(Markdown(full_content), title="[bold engineer]Network Engineer[/bold engineer]", border_style="engineer", expand=False) + ) continue if response.is_final: + if live_display: + try: live_display.stop() + except: pass # Final stop for status to ensure it disappears before the panel if status: try: status.stop() @@ -435,10 +511,13 @@ def ask(self, input_text, dryrun=False, chat_history=None, session_id=None, debu role_label = "Network Architect" if responder == "architect" else "Network Engineer" title = f"[bold {alias}]{role_label}[/bold {alias}]" - # Always print the final Panel content_to_print = full_content or final_result.get("response", "") if content_to_print: - printer.console.print(Panel(Markdown(content_to_print), title=title, border_style=alias, expand=False)) + if live_display: + # Re-render the final frame with correct title/colors + live_display.update(Panel(Markdown(content_to_print), title=title, border_style=alias, expand=False)) + else: + printer.console.print(Panel(Markdown(content_to_print), title=title, border_style=alias, expand=False)) break except Exception as e: # Check if it was a gRPC error that we should let handle_errors catch @@ -455,6 +534,27 @@ def ask(self, input_text, dryrun=False, chat_history=None, session_id=None, debu
      +
      +def configure_mcp(self, name, url=None, enabled=True, auto_load_on_os=None, remove=False) +
      +
      +
      + +Expand source code + +
      @handle_errors
      +def configure_mcp(self, name, url=None, enabled=True, auto_load_on_os=None, remove=False):
      +    req = connpy_pb2.MCPRequest(
      +        name=name,
      +        url=url or "",
      +        enabled=enabled,
      +        auto_load_on_os=auto_load_on_os or "",
      +        remove=remove
      +    )
      +    self.stub.configure_mcp(req)
      +
      +
      +
      def configure_provider(self, provider, model=None, api_key=None)
      @@ -924,15 +1024,98 @@ def set_reserved_names(self, names): self.remote_host = remote_host self.config = config + def _handle_remote_copilot(self, res, request_queue, response_queue, client_buffer_bytes, cmd_byte_positions, pause_generator, resume_generator, old_tty): + import json, asyncio, termios, sys, tty, queue + from ..core import copilot_terminal_mode + from . import connpy_pb2 + + pause_generator() + + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + interface = CopilotInterface( + self.config, + history=getattr(self, 'copilot_history', None), + session_state=getattr(self, 'copilot_state', None) + ) + self.copilot_history = interface.history + self.copilot_state = interface.session_state + + node_info = json.loads(res.copilot_node_info_json) if res.copilot_node_info_json else {} + + async def on_ai_call_remote(active_buffer, question, chunk_callback, merged_node_info): + # Send request to server + request_queue.put(connpy_pb2.InteractRequest( + copilot_question=question, + copilot_context_buffer=active_buffer, + copilot_node_info_json=json.dumps(merged_node_info) + )) + # Wait for chunks from server + while True: + try: + chunk_res = response_queue.get(timeout=0.1) + if chunk_res is None: return {"error": "Server disconnected"} + if chunk_res.copilot_stream_chunk: + chunk_callback(chunk_res.copilot_stream_chunk) + elif chunk_res.copilot_response_json: + return json.loads(chunk_res.copilot_response_json) + except queue.Empty: + await asyncio.sleep(0.05) + + # Wrap in async loop + async def run_remote_copilot(): + while True: + action, commands, custom_cmd = await interface.run_session( + raw_bytes=bytes(client_buffer_bytes), + cmd_byte_positions=cmd_byte_positions, + node_info=node_info, + on_ai_call=on_ai_call_remote + ) + + if action == "continue": + # Send continue signal to server to loop back for another question + request_queue.put(connpy_pb2.InteractRequest(copilot_action="continue")) + continue + + return action, commands, custom_cmd + + with copilot_terminal_mode(): + action, commands, custom_cmd = asyncio.run(run_remote_copilot()) + + # Prepare final action for server + action_sent = "cancel" + if action == "send_all" and commands: + # In remote mode, send the selected commands as a custom block + # so the server executes exactly what the user picked (e.g., selection '1') + action_sent = f"custom:{chr(10).join(commands)}" + elif action == "custom" and custom_cmd: + action_sent = f"custom:{chr(10).join(custom_cmd)}" + request_queue.put(connpy_pb2.InteractRequest(copilot_action=action_sent)) + resume_generator() + tty.setraw(sys.stdin.fileno()) + @handle_errors def connect_node(self, unique_id, sftp=False, debug=False, logger=None): import sys import select import tty import termios + import queue import os import threading + request_queue = queue.Queue() + client_buffer_bytes = bytearray() + cmd_byte_positions = [(0, None)] + pause_stdin = [False] + wake_r, wake_w = os.pipe() + + def pause_generator(): + pause_stdin[0] = True + os.write(wake_w, b'\x00') + + def resume_generator(): + pause_stdin[0] = False + def request_generator(): cols, rows = 80, 24 try: @@ -946,12 +1129,31 @@ def set_reserved_names(self, names): ) while True: - r, _, _ = select.select([sys.stdin.fileno()], [], []) - if r: + try: + while True: + req = request_queue.get_nowait() + if req is None: + return + yield req + except queue.Empty: + pass + + if pause_stdin[0]: + import time + time.sleep(0.05) + continue + + r, _, _ = select.select([sys.stdin.fileno(), wake_r], [], [], 0.05) + if wake_r in r: + os.read(wake_r, 1) + continue + if sys.stdin.fileno() in r and not pause_stdin[0]: try: data = os.read(sys.stdin.fileno(), 1024) if not data: break + if b'\r' in data or b'\n' in data: + cmd_byte_positions.append((len(client_buffer_bytes), None)) yield connpy_pb2.InteractRequest(stdin_data=data) except OSError: break @@ -969,30 +1171,77 @@ def set_reserved_names(self, names): old_tty = termios.tcgetattr(sys.stdin) try: + import time tty.setraw(sys.stdin.fileno()) response_iterator = self.stub.interact_node(request_generator()) - # First response is connection status + import queue + response_queue = queue.Queue() + + def response_consumer(): + try: + for r in response_iterator: + response_queue.put(r) + except Exception: + pass + response_queue.put(None) + + t_consumer = threading.Thread(target=response_consumer, daemon=True) + t_consumer.start() + + # First phase: Wait for connection status, print early data try: - first_res = next(response_iterator) - if first_res.success: - # Connection established on server, show success message - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.success(conn_msg) - tty.setraw(sys.stdin.fileno()) - else: - # Connection failed on server - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.error(f"Connection failed: {first_res.error_message}") - return - except StopIteration: + while True: + res = response_queue.get() + if res is None: + return + if res.stdout_data: + data = res.stdout_data + if debug: + data = data.replace(b'\x1b[H\x1b[2J', b'').replace(b'\x1bc', b'').replace(b'\x1b[3J', b'') + os.write(sys.stdout.fileno(), data) + + if res.success: + # Connection established on server, show success message + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.success(conn_msg) + pause_stdin[0] = False + tty.setraw(sys.stdin.fileno()) + break + + if res.error_message: + # Connection failed on server + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.error(f"Connection failed: {res.error_message}") + return + except queue.Empty: return - for res in response_iterator: + # Second phase: Stream active session + # Clear screen filter is only applied before success (Phase 1). + # Once the user has a prompt, Ctrl+L must work normally. + while True: + res = response_queue.get() + if res is None: + break + if res.copilot_prompt: + self._handle_remote_copilot( + res, request_queue, response_queue, + client_buffer_bytes, cmd_byte_positions, + pause_generator, resume_generator, old_tty + ) + continue + + if res.copilot_injected_command: + cmd_byte_positions.append((len(client_buffer_bytes), res.copilot_injected_command)) + if res.stdout_data: os.write(sys.stdout.fileno(), res.stdout_data) + client_buffer_bytes.extend(res.stdout_data) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + os.close(wake_r) + os.close(wake_w) @handle_errors def connect_dynamic(self, connection_params, debug=False): @@ -1000,10 +1249,23 @@ def set_reserved_names(self, names): import select import tty import termios + import queue import os import json params_json = json.dumps(connection_params) + request_queue = queue.Queue() + client_buffer_bytes = bytearray() + cmd_byte_positions = [(0, None)] + pause_stdin = [False] + wake_r, wake_w = os.pipe() + + def pause_generator(): + pause_stdin[0] = True + os.write(wake_w, b'\x00') + + def resume_generator(): + pause_stdin[0] = False def request_generator(): cols, rows = 80, 24 @@ -1019,12 +1281,31 @@ def set_reserved_names(self, names): ) while True: - r, _, _ = select.select([sys.stdin.fileno()], [], []) - if r: + try: + while True: + req = request_queue.get_nowait() + if req is None: + return + yield req + except queue.Empty: + pass + + if pause_stdin[0]: + import time + time.sleep(0.05) + continue + + r, _, _ = select.select([sys.stdin.fileno(), wake_r], [], [], 0.05) + if wake_r in r: + os.read(wake_r, 1) + continue + if sys.stdin.fileno() in r and not pause_stdin[0]: try: data = os.read(sys.stdin.fileno(), 1024) if not data: break + if b'\r' in data or b'\n' in data: + cmd_byte_positions.append((len(client_buffer_bytes), None)) yield connpy_pb2.InteractRequest(stdin_data=data) except OSError: break @@ -1043,30 +1324,75 @@ def set_reserved_names(self, names): old_tty = termios.tcgetattr(sys.stdin) try: + import time tty.setraw(sys.stdin.fileno()) response_iterator = self.stub.interact_node(request_generator()) - # First response is connection status + import queue + response_queue = queue.Queue() + + def response_consumer(): + try: + for r in response_iterator: + response_queue.put(r) + except Exception: + pass + response_queue.put(None) + + t_consumer = threading.Thread(target=response_consumer, daemon=True) + t_consumer.start() + + # First phase: Wait for connection status, print early data try: - first_res = next(response_iterator) - if first_res.success: - # Connection established on server, show success message - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.success(conn_msg) - tty.setraw(sys.stdin.fileno()) - else: - # Connection failed on server - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.error(f"Connection failed: {first_res.error_message}") - return - except StopIteration: + while True: + res = response_queue.get() + if res is None: + return + if res.stdout_data: + data = res.stdout_data + if debug: + data = data.replace(b'\x1b[H\x1b[2J', b'').replace(b'\x1bc', b'').replace(b'\x1b[3J', b'') + os.write(sys.stdout.fileno(), data) + + if res.success: + # Connection established on server, show success message + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.success(conn_msg) + pause_stdin[0] = False + tty.setraw(sys.stdin.fileno()) + break + + if res.error_message: + # Connection failed on server + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.error(f"Connection failed: {res.error_message}") + return + except queue.Empty: return - for res in response_iterator: + # Second phase: Stream active session + while True: + res = response_queue.get() + if res is None: + break + if res.copilot_prompt: + self._handle_remote_copilot( + res, request_queue, response_queue, + client_buffer_bytes, cmd_byte_positions, + pause_generator, resume_generator, old_tty + ) + continue + + if res.copilot_injected_command: + cmd_byte_positions.append((len(client_buffer_bytes), res.copilot_injected_command)) + if res.stdout_data: os.write(sys.stdout.fileno(), res.stdout_data) + client_buffer_bytes.extend(res.stdout_data) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + os.close(wake_r) + os.close(wake_w) @MethodHook @handle_errors @@ -1220,10 +1546,23 @@ def connect_dynamic(self, connection_params, debug=False): import select import tty import termios + import queue import os import json params_json = json.dumps(connection_params) + request_queue = queue.Queue() + client_buffer_bytes = bytearray() + cmd_byte_positions = [(0, None)] + pause_stdin = [False] + wake_r, wake_w = os.pipe() + + def pause_generator(): + pause_stdin[0] = True + os.write(wake_w, b'\x00') + + def resume_generator(): + pause_stdin[0] = False def request_generator(): cols, rows = 80, 24 @@ -1239,12 +1578,31 @@ def connect_dynamic(self, connection_params, debug=False): ) while True: - r, _, _ = select.select([sys.stdin.fileno()], [], []) - if r: + try: + while True: + req = request_queue.get_nowait() + if req is None: + return + yield req + except queue.Empty: + pass + + if pause_stdin[0]: + import time + time.sleep(0.05) + continue + + r, _, _ = select.select([sys.stdin.fileno(), wake_r], [], [], 0.05) + if wake_r in r: + os.read(wake_r, 1) + continue + if sys.stdin.fileno() in r and not pause_stdin[0]: try: data = os.read(sys.stdin.fileno(), 1024) if not data: break + if b'\r' in data or b'\n' in data: + cmd_byte_positions.append((len(client_buffer_bytes), None)) yield connpy_pb2.InteractRequest(stdin_data=data) except OSError: break @@ -1263,30 +1621,75 @@ def connect_dynamic(self, connection_params, debug=False): old_tty = termios.tcgetattr(sys.stdin) try: + import time tty.setraw(sys.stdin.fileno()) response_iterator = self.stub.interact_node(request_generator()) - # First response is connection status + import queue + response_queue = queue.Queue() + + def response_consumer(): + try: + for r in response_iterator: + response_queue.put(r) + except Exception: + pass + response_queue.put(None) + + t_consumer = threading.Thread(target=response_consumer, daemon=True) + t_consumer.start() + + # First phase: Wait for connection status, print early data try: - first_res = next(response_iterator) - if first_res.success: - # Connection established on server, show success message - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.success(conn_msg) - tty.setraw(sys.stdin.fileno()) - else: - # Connection failed on server - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.error(f"Connection failed: {first_res.error_message}") - return - except StopIteration: + while True: + res = response_queue.get() + if res is None: + return + if res.stdout_data: + data = res.stdout_data + if debug: + data = data.replace(b'\x1b[H\x1b[2J', b'').replace(b'\x1bc', b'').replace(b'\x1b[3J', b'') + os.write(sys.stdout.fileno(), data) + + if res.success: + # Connection established on server, show success message + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.success(conn_msg) + pause_stdin[0] = False + tty.setraw(sys.stdin.fileno()) + break + + if res.error_message: + # Connection failed on server + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.error(f"Connection failed: {res.error_message}") + return + except queue.Empty: return - for res in response_iterator: + # Second phase: Stream active session + while True: + res = response_queue.get() + if res is None: + break + if res.copilot_prompt: + self._handle_remote_copilot( + res, request_queue, response_queue, + client_buffer_bytes, cmd_byte_positions, + pause_generator, resume_generator, old_tty + ) + continue + + if res.copilot_injected_command: + cmd_byte_positions.append((len(client_buffer_bytes), res.copilot_injected_command)) + if res.stdout_data: os.write(sys.stdout.fileno(), res.stdout_data) + client_buffer_bytes.extend(res.stdout_data) finally: - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + os.close(wake_r) + os.close(wake_w)
      @@ -1304,9 +1707,23 @@ def connect_node(self, unique_id, sftp=False, debug=False, logger=None): import select import tty import termios + import queue import os import threading + request_queue = queue.Queue() + client_buffer_bytes = bytearray() + cmd_byte_positions = [(0, None)] + pause_stdin = [False] + wake_r, wake_w = os.pipe() + + def pause_generator(): + pause_stdin[0] = True + os.write(wake_w, b'\x00') + + def resume_generator(): + pause_stdin[0] = False + def request_generator(): cols, rows = 80, 24 try: @@ -1320,12 +1737,31 @@ def connect_node(self, unique_id, sftp=False, debug=False, logger=None): ) while True: - r, _, _ = select.select([sys.stdin.fileno()], [], []) - if r: + try: + while True: + req = request_queue.get_nowait() + if req is None: + return + yield req + except queue.Empty: + pass + + if pause_stdin[0]: + import time + time.sleep(0.05) + continue + + r, _, _ = select.select([sys.stdin.fileno(), wake_r], [], [], 0.05) + if wake_r in r: + os.read(wake_r, 1) + continue + if sys.stdin.fileno() in r and not pause_stdin[0]: try: data = os.read(sys.stdin.fileno(), 1024) if not data: break + if b'\r' in data or b'\n' in data: + cmd_byte_positions.append((len(client_buffer_bytes), None)) yield connpy_pb2.InteractRequest(stdin_data=data) except OSError: break @@ -1343,30 +1779,77 @@ def connect_node(self, unique_id, sftp=False, debug=False, logger=None): old_tty = termios.tcgetattr(sys.stdin) try: + import time tty.setraw(sys.stdin.fileno()) response_iterator = self.stub.interact_node(request_generator()) - # First response is connection status + import queue + response_queue = queue.Queue() + + def response_consumer(): + try: + for r in response_iterator: + response_queue.put(r) + except Exception: + pass + response_queue.put(None) + + t_consumer = threading.Thread(target=response_consumer, daemon=True) + t_consumer.start() + + # First phase: Wait for connection status, print early data try: - first_res = next(response_iterator) - if first_res.success: - # Connection established on server, show success message - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.success(conn_msg) - tty.setraw(sys.stdin.fileno()) - else: - # Connection failed on server - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) - printer.error(f"Connection failed: {first_res.error_message}") - return - except StopIteration: + while True: + res = response_queue.get() + if res is None: + return + if res.stdout_data: + data = res.stdout_data + if debug: + data = data.replace(b'\x1b[H\x1b[2J', b'').replace(b'\x1bc', b'').replace(b'\x1b[3J', b'') + os.write(sys.stdout.fileno(), data) + + if res.success: + # Connection established on server, show success message + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.success(conn_msg) + pause_stdin[0] = False + tty.setraw(sys.stdin.fileno()) + break + + if res.error_message: + # Connection failed on server + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + printer.error(f"Connection failed: {res.error_message}") + return + except queue.Empty: return - for res in response_iterator: + # Second phase: Stream active session + # Clear screen filter is only applied before success (Phase 1). + # Once the user has a prompt, Ctrl+L must work normally. + while True: + res = response_queue.get() + if res is None: + break + if res.copilot_prompt: + self._handle_remote_copilot( + res, request_queue, response_queue, + client_buffer_bytes, cmd_byte_positions, + pause_generator, resume_generator, old_tty + ) + continue + + if res.copilot_injected_command: + cmd_byte_positions.append((len(client_buffer_bytes), res.copilot_injected_command)) + if res.stdout_data: os.write(sys.stdout.fileno(), res.stdout_data) + client_buffer_bytes.extend(res.stdout_data) finally: - termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) + os.close(wake_r) + os.close(wake_w)
      @@ -2036,6 +2519,7 @@ def stop_api(self):

      AIStub

      • ask
      • +
      • configure_mcp
      • configure_provider
      • confirm
      • delete_session
      • @@ -2130,7 +2614,7 @@ def stop_api(self): diff --git a/docs/connpy/grpc_layer/utils.html b/docs/connpy/grpc_layer/utils.html index da5286b..571fbd2 100644 --- a/docs/connpy/grpc_layer/utils.html +++ b/docs/connpy/grpc_layer/utils.html @@ -3,7 +3,7 @@ - + connpy.grpc_layer.utils API documentation @@ -138,7 +138,7 @@ el.replaceWith(d); diff --git a/docs/connpy/index.html b/docs/connpy/index.html index ae26ce2..7886fa4 100644 --- a/docs/connpy/index.html +++ b/docs/connpy/index.html @@ -3,9 +3,11 @@ - + connpy API documentation - + @@ -36,478 +38,149 @@ el.replaceWith(d);

        Package connpy

        -

        Connection manager

        -

        Connpy is a SSH, SFTP, Telnet, kubectl, Docker pod, and AWS SSM connection manager and automation module for Linux, Mac, and Docker.

        -

        Features

        -
        - Manage connections using SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM.
        -- Set contexts to manage specific nodes from specific contexts (work/home/clients/etc).
        -- You can generate profiles and reference them from nodes using @profilename so you don't
        -  need to edit multiple nodes when changing passwords or other information.
        -- Nodes can be stored on @folder or @subfolder@folder to organize your devices. They can
        -  be referenced using node@subfolder@folder or node@folder.
        -- If you have too many nodes, get a completion script using: conn config --completion.
        -  Or use fzf by installing pyfzf and running conn config --fzf true.
        -- Create in bulk, copy, move, export, and import nodes for easy management.
        -- Run automation scripts on network devices.
        -- Use AI with a multi-agent system (Engineer/Architect) to help you manage your devices.
        -  Supports any LLM provider via litellm (OpenAI, Anthropic, Google, etc.).
        -- Add plugins with your own scripts, and execute them remotely.
        -- Fully decoupled gRPC Client/Server architecture.
        -- Unified UI with syntax highlighting and theming.
        -- Much more!
        +

        +App Logo +

        +

        Connpy

        +

        + + +

        +

        Connpy is a powerful Connection Manager and Network Automation Platform for Linux, Mac, and Docker. It provides a unified interface for SSH, SFTP, Telnet, kubectl, Docker pods, and AWS SSM.

        +

        The v6 release introduces the AI Copilot, an interactive terminal assistant that understands your network context and helps you manage your infrastructure more intelligently.

        +

        πŸ€– AI Copilot (New in v6)

        +

        The AI Copilot is deeply integrated into your terminal workflow: +- Terminal Context Awareness: The Copilot can "see" your screen output, helping you diagnose errors or analyze command results in real-time. +- Hybrid Multi-Agent System: Automatically escalates complex tasks between the Network Engineer (execution) and the Network Architect (strategy). +- MCP Integration: Dynamically load tools from external providers (6WIND, AWS, etc.) via the Model Context Protocol. +- Interactive Chat: Launch with conn ai for a collaborative troubleshooting session.

        +

        Core Features

        +
          +
        • Multi-Protocol: Native support for SSH, SFTP, Telnet, kubectl, Docker exec, and AWS SSM.
        • +
        • Context Management: Set regex-based contexts to manage specific nodes across different environments (work, home, clients).
        • +
        • Advanced Inventory:
            +
          • Organize nodes in folders (@folder) and subfolders (@subfolder@folder).
          • +
          • Use Global Profiles (@profilename) to manage shared credentials easily.
          • +
          • Bulk creation, copying, moving, and export/import of nodes.
          • +
          +
        • +
        • Modern UI: High-performance terminal experience with prompt-toolkit, including:
            +
          • Fuzzy search integration with fzf.
          • +
          • Advanced tab completion.
          • +
          • Syntax highlighting and customizable themes.
          • +
          +
        • +
        • Automation Engine: Run parallel tasks and playbooks on multiple devices with variable support.
        • +
        • Plugin System: Build and execute custom Python scripts locally or on a remote gRPC server.
        • +
        • gRPC Architecture: Fully decoupled Client/Server model for distributed management.
        • +
        • Privacy & Sync: Local-first encrypted storage (RSA/OAEP) with optional Google Drive backup.
        • +
        +

        Installation

        +
        pip install connpy
         
        -

        Usage

        -
        usage: conn [-h] [--add | --del | --mod | --show | --debug] [node|folder] [--sftp]
        -       conn {profile,move,mv,copy,cp,list,ls,bulk,export,import,ai,run,api,plugin,config,sync,context} ...
        +

        Run it in Windows/Linux using Docker

        +
        git clone https://github.com/fluzzi/connpy
        +cd connpy
        +docker compose build
         
        -positional arguments:
        -  node|folder        node[@subfolder][@folder]
        -                     Connect to specific node or show all matching nodes
        -                     [@subfolder][@folder]
        -                     Show all available connections globally or in specified path
        +# Run it like a native app (completely silent)
        +docker compose --log-level ERROR run --rm --remove-orphans connpy-app [command]
         
        -options:
        -  -h, --help         show this help message and exit
        -  -v, --version      Show version
        -  -a, --add          Add new node[@subfolder][@folder] or [@subfolder]@folder
        -  -r, --del, --rm    Delete node[@subfolder][@folder] or [@subfolder]@folder
        -  -e, --mod, --edit  Modify node[@subfolder][@folder]
        -  -s, --show         Show node[@subfolder][@folder]
        -  -d, --debug        Display all conections steps
        -  -t, --sftp         Connects using sftp instead of ssh
        -  --service-mode     Set the backend service mode (local or remote)
        -  --remote           Connect to a remote connpy service via gRPC
        -  --theme            UI Output theme (dark, light, or path)
        -
        -Commands:
        -  profile         Manage profiles
        -  move(mv)        Move node
        -  copy(cp)        Copy node
        -  list(ls)        List profiles, nodes or folders
        -  bulk            Add nodes in bulk
        -  export          Export connection folder to Yaml file
        -  import          Import connection folder to config from Yaml file
        -  ai              Make request to an AI
        -  run             Run scripts or commands on nodes
        -  api             Start and stop connpy api
        -  plugin          Manage plugins
        -  config          Manage app config
        -  sync            Sync config with Google
        -  context         Manage contexts with regex matching
        +# Pro Tip: Add this alias for a 100% native experience from any folder
        +alias conn='docker compose -f /path/to/connpy/docker-compose.yml --log-level ERROR run --rm --remove-orphans connpy-app'
         
        -

        Manage profiles

        -
        usage: conn profile [-h] (--add | --del | --mod | --show) profile
        -
        -positional arguments:
        -  profile        Name of profile to manage
        -
        -options:
        -  -h, --help         show this help message and exit
        -  -a, --add          Add new profile
        -  -r, --del, --rm    Delete profile
        -  -e, --mod, --edit  Modify profile
        -  -s, --show         Show profile
        -
        +
        +

        πŸ”’ Privacy & Integration

        +

        Privacy Policy

        +

        Connpy is committed to protecting your privacy: +- Local Storage: All server addresses, usernames, and passwords are encrypted and stored only on your machine. No data is transmitted to our servers. +- Data Access: Data is used solely for managing and automating your connections.

        +

        Google Integration

        +

        Used strictly for backup: +- Backup: Sync your encrypted configuration with your Google Drive account. +- Scoped Access: Connpy only accesses its own backup files.

        +
        +

        Usage

        +
        usage: conn [-h] [--add | --del | --mod | --show | --debug] [node|folder] [--sftp]
        +       conn {profile,move,copy,list,bulk,export,import,ai,run,api,plugin,config,sync,context} ...
         
        -

        Examples

        -
           #Add new profile
        -   conn profile --add office-user
        -   #Add new folder
        -   conn --add @office
        -   #Add new subfolder
        -   conn --add @datacenter@office
        -   #Add node to subfolder
        -   conn --add server@datacenter@office
        -   #Add node to folder
        -   conn --add pc@office
        -   #Show node information
        -   conn --show server@datacenter@office
        -   #Connect to nodes
        -   conn pc@office
        -   conn server
        -   #Create and set new context
        -   conn context -a office .*@office
        -   conn context --set office
        -   #Run a command in a node
        -   conn run server ls -la
        +

        Basic Examples:

        +
        # Add a folder and subfolder
        +conn --add @office
        +conn --add @datacenter@office
        +
        +# Add a node with a profile
        +conn --add server1@datacenter@office --profile @myuser
        +
        +# Connect to a node (fuzzy match)
        +conn server1
        +
        +# Start the AI Copilot
        +conn ai
        +
        +# Run a command on all nodes in a folder
        +conn run @office "uptime"
         
        +

        Plugin Requirements for Connpy

        Remote Plugin Execution

        When Connpy operates in remote mode, plugins are executed transparently on the server: - The client automatically downloads the plugin source code (Parser class context) to generate the local argparse structure and provide autocompletion. -- The execution phase (Entrypoint class) is redirected via gRPC streams to execute in the server's memory, ensuring the plugin runs securely against the server's inventory without passing sensitive data to the client. -- You can manage remote plugins using the --remote flag (e.g. connpy plugin --add myplugin script.py --remote).

        +- The execution phase (Entrypoint class) is redirected via gRPC streams to execute in the server's memory. +- You can manage remote plugins using the --remote flag.

        General Structure

          -
        • The plugin script must be a Python file.
        • -
        • Only the following top-level elements are allowed in the plugin script:
        • -
        • Class definitions
        • -
        • Function definitions
        • -
        • Import statements
        • -
        • The if __name__ == "__main__": block for standalone execution
        • -
        • Pass statements
        • -
        -

        Specific Class Requirements

        -
          -
        • The plugin script must define specific classes with particular attributes and methods. Each class serves a distinct role within the plugin's architecture:
        • -
        • Class Parser:
            -
          • Purpose: Handles parsing of command-line arguments.
          • -
          • Requirements:
          • -
          • Must contain only one method: __init__.
          • -
          • The __init__ method must initialize at least one attribute:
              -
            • self.parser: An instance of argparse.ArgumentParser.
            • -
            -
          • -
          -
        • -
        • Class Entrypoint:
            -
          • Purpose: Acts as the entry point for plugin execution, utilizing parsed arguments and integrating with the main application.
          • -
          • Requirements:
          • -
          • Must have an __init__ method that accepts exactly three parameters besides self:
              -
            • args: Arguments passed to the plugin.
            • -
            • The parser instance (typically self.parser from the Parser class).
            • -
            • The Connapp instance to interact with the Connpy app.
            • -
            -
          • -
          -
        • -
        • Class Preload:
            -
          • Purpose: Performs any necessary preliminary setup or configuration independent of the main parsing and entry logic.
          • -
          -
        • -
        • Requirements:
            -
          • Contains at least an __init__ method that accepts parameter connapp besides self.
          • -
          -
        • -
        -

        Class Dependencies and Combinations

        -
          -
        • Dependencies:
        • -
        • Parser and Entrypoint are interdependent and must both be present if one is included.
        • -
        • Preload is independent and may exist alone or alongside the other classes.
        • -
        • Valid Combinations:
        • -
        • Parser and Entrypoint together.
        • -
        • Preload alone.
        • -
        • All three classes (Parser, Entrypoint, Preload).
        • +
        • The plugin script must define specific classes:
        • +
        • Class Parser: Handles argparse.ArgumentParser initialization.
        • +
        • Class Entrypoint: Main execution logic (receives args, parser, and connapp).
        • +
        • Class Preload: (Optional) For modifying core app behavior or registering hooks.

        Preload Modifications and Hooks

        -

        In the Preload class of the plugin system, you have the ability to customize the behavior of existing classes and methods within the application through a robust hooking system. This documentation explains how to use the modify, register_pre_hook, and register_post_hook methods to tailor plugin functionality to your needs.

        -

        Modifying Classes with modify

        -

        The modify method allows you to alter instances of a class at the time they are created or after their creation. This is particularly useful for setting or modifying configuration settings, altering default behaviors, or adding new functionalities to existing classes without changing the original class definitions.

        -
          -
        • Usage: Modify a class to include additional configurations or changes
        • -
        • Modify Method Signature:
        • -
        • modify(modification_method): A function that is invoked with an instance of the class as its argument. This function should perform any modifications directly on this instance.
        • -
        • Modification Method Signature:
        • -
        • Arguments:
            -
          • cls: -This function accepts a single argument, the class instance, which it then modifies.
          • -
          -
        • -
        • Modifiable Classes:
            -
          • connapp.config
          • -
          • connapp.node
          • -
          • connapp.nodes
          • -
          • connapp.ai
          • -
          -
        • -
        • -

          ```python -def modify_config(cls): -# Example modification: adding a new attribute or modifying an existing one -cls.new_attribute = 'New Value'

          -

          class Preload: -def init(self, connapp): -# Applying modification to the config class instance -connapp.config.modify(modify_config) -```

          -
        • -
        -

        Implementing Method Hooks

        -

        There are 2 methods that allows you to define custom logic to be executed before (register_pre_hook) or after (register_post_hook) the main logic of a method. This is particularly useful for logging, auditing, preprocessing inputs, postprocessing outputs or adding functionalities.

        -
          -
        • Usage: Register hooks to methods to execute additional logic before or after the main method execution.
        • -
        • Registration Methods Signature:
        • -
        • register_pre_hook(pre_hook_method): A function that is invoked before the main method is executed. This function should do preprocessing of the arguments.
        • -
        • register_post_hook(post_hook_method): A function that is invoked after the main method is executed. This function should do postprocessing of the outputs.
        • -
        • Method Signatures for Pre-Hooks
        • -
        • pre_hook_method(*args, **kwargs)
        • -
        • Arguments:
            -
          • *args, **kwargs: The arguments and keyword arguments that will be passed to the method being hooked. The pre-hook function has the opportunity to inspect and modify these arguments before they are passed to the main method.
          • -
          -
        • -
        • Return:
            -
          • Must return a tuple (args, kwargs), which will be used as the new arguments for the main method. If the original arguments are not modified, the function should return them as received.
          • -
          -
        • -
        • Method Signatures for Post-Hooks:
        • -
        • post_hook_method(*args, **kwargs)
        • -
        • Arguments:
            -
          • *args, **kwargs: The arguments and keyword arguments that were passed to the main method.
          • -
          • kwargs["result"]: The value returned by the main method. This allows the post-hook to inspect and even alter the result before it is returned to the original caller.
          • -
          -
        • -
        • Return:
            -
          • Can return a modified result, which will replace the original result of the main method, or simply return kwargs["result"] to return the original method result. -
          • -
          -
        • -
        • -

          ```python -def pre_processing_hook(args, *kwargs): -print("Pre-processing logic here") -# Modify arguments or perform any checks -return args, kwargs -# Return modified or unmodified args and kwargs

          -

          def post_processing_hook(args, *kwargs): -print("Post-processing logic here") -# Modify the result or perform any final logging or cleanup -return kwargs["result"] -# Return the modified or unmodified result

          -

          class Preload: -def init(self, connapp): -# Registering a pre-hook -connapp.ai.some_method.register_pre_hook(pre_processing_hook)

          -
              # Registering a post-hook
          -    connapp.node.another_method.register_post_hook(post_processing_hook)
          -
          -

          ```

          -
        • -
        -

        Executable Block

        -
          -
        • The plugin script can include an executable block:
        • -
        • if __name__ == "__main__":
        • -
        • This block allows the plugin to be run as a standalone script for testing or independent use.
        • -
        +

        You can customize the behavior of core classes using hooks: +- modify(method): Alter class instances (e.g., connapp.config, connapp.ai). +- register_pre_hook(method): Logic to run before a method execution. +- register_post_hook(method): Logic to run after a method execution.

        Command Completion Support

        -

        Plugins can provide intelligent tab completion by defining autocompletion logic. There are two supported methods, with the tree-based approach being the most modern and recommended.

        - -

        Define a function called _connpy_tree that returns a declarative navigation tree. This method is highly efficient, supports complex state loops, and is very simple to implement for most use cases.

        -
        def _connpy_tree(info=None):
        -    nodes = info.get("nodes", [])
        -    return {
        -        "__exclude_used__": True,  # Filter out words already typed
        -        "__extra__": nodes,        # Suggest nodes at this level
        -        "--format": ["json", "yaml", "table"], # Fixed suggestions
        -        "*": {                     # Wildcard matches any positional word
        -            "interface1": None,
        -            "interface2": None,
        -            "--verbose": None
        -        }
        -    }
        -
        -
          -
        • Keys: Literal completions (exact matches).
        • -
        • * Key: A wildcard that matches any positional word typed by the user.
        • -
        • __extra__: A list or a callable (words) -> list that adds dynamic suggestions.
        • -
        • __exclude_used__: (Boolean) If True, automatically filters out words already present in the command line.
        • -
        -

        2. Legacy Function-based Completion

        -

        For backward compatibility or highly custom logic, you can define _connpy_completion.

        -
        def _connpy_completion(wordsnumber, words, info=None):
        -    if wordsnumber == 3:
        -        return ["--help", "--verbose", "start", "stop"]
        -
        -    elif wordsnumber == 4 and words[2] == "start":
        -        return info["nodes"]  # Suggest node names
        -
        -    return []
        -
        - - - - - - - - - - - - - - - - - - - - - -
        ParameterDescription
        wordsnumberInteger indicating the total number of words on the command line. For plugins, this typically starts at 3.
        wordsA list of tokens (words) already typed. words[0] is always the name of the plugin.
        infoA dictionary of structured context data (nodes, folders, profiles, config).
        -
        -

        In this example, if the user types connpy myplugin start and presses Tab, it will suggest node names.

        -
        -

        Handling Unknown Arguments

        -

        Plugins can choose to accept and process unknown arguments that are not explicitly defined in the parser. To enable this behavior, the plugin must define the following hidden argument in its Parser class:

        -
        self.parser.add_argument(
        -    "--unknown-args",
        -    action="store_true",
        -    default=True,
        -    help=argparse.SUPPRESS
        -)
        -
        -

        Behavior:

        -
          -
        • When this argument is present, Connpy will parse the known arguments and capture any extra (unknown) ones.
        • -
        • These unknown arguments will be passed to the plugin as args.unknown_args inside the Entrypoint.
        • -
        • If the user does not pass any unknown arguments, args.unknown_args will contain the default value (True, unless overridden).
        • -
        -

        Example:

        -

        If a plugin accepts unknown tcpdump flags like this:

        -
        connpy myplugin -nn -s0
        -
        -

        And defines the hidden --unknown-args flag as shown above, then:

        -
          -
        • args.unknown_args inside Entrypoint.__init__() will be: ['-nn', '-s0']
        • -
        -
        -

        This allows the plugin to receive and process arguments intended for external tools (e.g., tcpdump) without argparse raising an error.

        -
        -

        Note:

        -

        If a plugin does not define --unknown-args, any extra arguments passed will cause argparse to fail with an unrecognized arguments error.

        -

        Script Verification

        -
          -
        • The verify_script method in plugins.py is used to check the plugin script's compliance with these standards.
        • -
        • Non-compliant scripts will be rejected to ensure consistency and proper functionality within the plugin system.
        • -
        • -
        -

        Example Script

        -

        For a practical example of how to write a compatible plugin script, please refer to the following example:

        -

        Example Plugin Script

        -

        This script demonstrates the required structure and implementation details according to the plugin system's standards.

        -

        gRPC Service Architecture

        -

        Connpy features a completely decoupled gRPC Client/Server architecture. You can run Connpy as a standalone background service and connect to it remotely via the CLI or other clients.

        -

        1. Start the Server

        -

        Start the gRPC service by running:

        -
        connpy api -s 50051
        -
        -

        The server will handle all configurations, connections, AI sessions, and plugin execution locally on the machine it runs on.

        -

        2. Connect the Client

        -

        Configure your local CLI client to connect to the remote server:

        -
        connpy config --service-mode remote
        -connpy config --remote-host localhost:50051
        -
        -

        Once configured, all commands (connpy node, connpy list, connpy ai, etc.) will execute transparently on the remote server via thin-client proxies. You can revert back to standalone execution at any time by running connpy config --service-mode local.

        -

        Programmatic Access (gRPC & SOA)

        -

        Developers can build their own applications using the Connpy backend by utilizing the ServiceProvider:

        -
        from connpy.services.provider import ServiceProvider
        -services = ServiceProvider(config, mode="remote", remote_host="localhost:50051")
        -nodes = services.nodes.list_nodes()
        -
        -

        Automation module

        -

        The automation module

        -

        Standalone module

        -
        import connpy
        -router = connpy.node("uniqueName","ip/host", user="user", password="pass")
        -router.run(["term len 0","show run"])
        +

        Plugins can provide intelligent tab completion: +1. Tree-based Completion (Recommended): Define _connpy_tree(info) returning a navigation dictionary. +2. Legacy Completion: Define _connpy_completion(wordsnumber, words, info).

        +
        +

        βš™οΈ gRPC Service Architecture

        +

        Connpy can operate in a decoupled mode: +1. Start the API (Server): conn api -s 50051 +2. Configure the Client: +bash +conn config --service-mode remote +conn config --remote-host localhost:50051 +All inventory management and execution will now happen on the server.

        +
        +

        🐍 Automation Module (API)

        +

        You can use connpy as a Python library for your own scripts.

        +

        Basic Execution

        +
        import connpy
        +router = connpy.node("uniqueName", "1.1.1.1", user="admin")
        +router.run(["show ip int brief"])
         print(router.output)
        -hasip = router.test("show ip int brief","1.1.1.1")
        -if hasip:
        -    print("Router has ip 1.1.1.1")
        -else:
        -    print("router does not have ip 1.1.1.1")
         
        -

        Using manager configuration

        -
        import connpy
        -conf = connpy.configfile()
        -device = conf.getitem("server@office")
        -server = connpy.node("unique name", **device, config=conf)
        -result = server.run(["cd /", "ls -la"])
        -print(result)
        -
        -

        Running parallel tasks

        -
        import connpy
        -conf = connpy.configfile()
        -#You can get the nodes from the config from a folder and fitlering in it
        -nodes = conf.getitem("@office", ["router1", "router2", "router3"])
        -#You can also get each node individually:
        -nodes = {}
        -nodes["router1"] = conf.getitem("router1@office")
        -nodes["router2"] = conf.getitem("router2@office")
        -nodes["router10"] = conf.getitem("router10@datacenter")
        -#Also, you can create the nodes manually:
        -nodes = {}
        -nodes["router1"] = {"host": "1.1.1.1", "user": "user", "password": "pass1"}
        -nodes["router2"] = {"host": "1.1.1.2", "user": "user", "password": "pass2"}
        -nodes["router3"] = {"host": "1.1.1.2", "user": "user", "password": "pass3"}
        -#Finally you run some tasks on the nodes
        -mynodes = connpy.nodes(nodes, config = conf)
        -result = mynodes.test(["show ip int br"], "1.1.1.2")
        -for i in result:
        -    print("---" + i + "---")
        -    print(result[i])
        -    print()
        -# Or for one specific node
        -mynodes.router1.run(["term len 0". "show run"], folder = "/home/user/logs")
        -
        -

        Using variables

        -
        import connpy
        +

        Parallel Tasks with Variables

        +
        import connpy
         config = connpy.configfile()
        -nodes = config.getitem("@office", ["router1", "router2", "router3"])
        -commands = []
        -commands.append("config t")
        -commands.append("interface lo {id}")
        -commands.append("ip add {ip} {mask}")
        -commands.append("end")
        -variables = {}
        -variables["router1@office"] = {"ip": "10.57.57.1"}
        -variables["router2@office"] = {"ip": "10.57.57.2"}
        -variables["router3@office"] = {"ip": "10.57.57.3"}
        -variables["__global__"] = {"id": "57"}
        -variables["__global__"]["mask"] =  "255.255.255.255"
        -expected = "!"
        -routers = connpy.nodes(nodes, config = config)
        -routers.run(commands, variables)
        -routers.test("ping {ip}", expected, variables)
        -for key in routers.result:
        -    print(key, ' ---> ', ("pass" if routers.result[key] else "fail"))
        -
        -

        Using AI

        -
        import connpy
        -conf = connpy.configfile()
        -# Uses models and API keys from config, or override them:
        -myai = connpy.ai(conf, engineer_model="gemini/gemini-2.5-flash", engineer_api_key="your-key")
        -result = myai.ask("go to router1 and show me the running configuration")
        -print(result["response"])
        -# Streaming is enabled by default for CLI, disable for programmatic use:
        -result = myai.ask("show interfaces on all routers", stream=False)
        -print(result["response"])
        -
        -

        AI Plugin Tool Registration

        -

        Plugins can register custom tools with the AI system using register_ai_tool() in their Preload class:

        -
        def _register_my_tools(ai_instance):
        -    tool_def = {
        -        "type": "function",
        -        "function": {
        -            "name": "my_custom_tool",
        -            "description": "Does something useful.",
        -            "parameters": {
        -                "type": "object",
        -                "properties": {"query": {"type": "string"}},
        -                "required": ["query"]
        -            }
        -        }
        -    }
        -    ai_instance.register_ai_tool(
        -        tool_definition=tool_def,
        -        handler=my_handler_function,
        -        target="engineer",  # or "architect" or "both"
        -        engineer_prompt="- My tool: does X.",
        -        architect_prompt="  * My tool (my_custom_tool)."
        -    )
        +nodes = config.getitem("@office", ["router1", "router2"])
        +routers = connpy.nodes(nodes, config=config)
         
        -class Preload:
        -    def __init__(self, connapp):
        -        connapp.ai.modify(_register_my_tools)
        +variables = {
        +    "router1@office": {"id": "1"},
        +    "__global__": {"mask": "255.255.255.0"}
        +}
        +routers.run(["interface lo{id}", "ip address 10.0.0.{id} {mask}"], variables)
         
        -

        Developer Notes (SOA Architecture)

        -

        As of version 2.0, Connpy has migrated to a Service-Oriented Architecture (SOA): -- connpy/cli/: Contains all CLI handlers. These are responsible for argument parsing, user interaction (via inquirer), and visual output (via printer). -- connpy/services/: Contains pure logic services (Node, Profile, Execution, etc.). -- Zero-Print Policy: Services must never use print(). All output must be returned as data structures or generators to the caller (CLI handlers). -- ServiceProvider: Access services via connapp.services. This allows transparent switching between local and remote (gRPC) backends without modifying CLI logic.

        +

        AI Programmatic Use

        +
        import connpy
        +myai = connpy.ai(connpy.configfile())
        +response = myai.ask("What is the status of the BGP neighbors in the office?")
        +
        +
        +

        For detailed developer notes and plugin hooks documentation, see the Documentation.

        Sub-modules

        @@ -520,6 +193,10 @@ class Preload:
        +
        connpy.mcp_client
        +
        +
        +
        connpy.proto
        @@ -536,6 +213,10 @@ class Preload:
        +
        connpy.utils
        +
        +
        +
  • @@ -969,7 +650,7 @@ class ai: aiconfig = self.config.config.get("ai", {}) # Modelos (Prioridad: Argumento -> Config -> Default) - self.engineer_model = engineer_model or aiconfig.get("engineer_model") or "gemini/gemini-3.1-flash-lite-preview" + self.engineer_model = engineer_model or aiconfig.get("engineer_model") or "gemini/gemini-3.1-flash-lite" self.architect_model = architect_model or aiconfig.get("architect_model") or "anthropic/claude-sonnet-4-6" # API Keys (Prioridad: Argumento -> Config) @@ -995,6 +676,9 @@ class ai: self.tool_status_formatters = {} # {"tool_name": formatter_callable} self.engineer_prompt_extensions = [] # Extra text for engineer prompt self.architect_prompt_extensions = [] # Extra text for architect prompt + + # MCP Manager + self.mcp_manager = MCPClientManager(self.config) # Long-term memory self.memory_path = os.path.join(self.config.defaultdir, "ai_memory.md") @@ -1180,7 +864,7 @@ class ai: raise KeyboardInterrupt chunk_callback(delta.content) - if not debug and not chunk_callback: + if not chunk_callback: if not is_streaming_text: # Stop spinner definitively if status: @@ -1188,9 +872,15 @@ class ai: status.stop() except Exception: pass + + # Create a stable, direct Console to bypass _ConsoleProxy recreation bugs + from rich.console import Console as RichConsole + from .printer import connpy_theme, get_original_stdout + stable_console = RichConsole(theme=connpy_theme, file=get_original_stdout()) + live_display = Live( Panel(Markdown(full_content), title=title, border_style=border, expand=False), - console=self.console, + console=stable_console, refresh_per_second=8, transient=False ) @@ -1212,7 +902,10 @@ class ai: ) except Exception: pass - live_display.stop() + try: + live_display.stop() + except Exception: + pass # Rebuild complete response from chunks try: @@ -1520,7 +1213,7 @@ class ai: self.console.print("[pass]βœ“ Trust Mode Enabled. All future commands in this session will execute without confirmation.[/pass]") elif user_resp_lower in ['y', 'yes']: self.console.print("[pass]βœ“ Executing...[/pass]") - elif user_resp_lower in ['n', 'no', '']: + elif user_resp_lower in ['n', 'no', '', 'cancel']: self.console.print("[fail]βœ— Execution rejected by user.[/fail]") return "Error: User rejected execution." else: @@ -1616,6 +1309,10 @@ class ai: cmd_str = cmds[0] if cmds else "" status.update(f"[ai_status]Engineer: [CMD] {cmd_str}") elif fn == "get_node_info": status.update(f"[ai_status]Engineer: [INSPECT] {args.get('node_name','')}") + elif fn.startswith("mcp_"): + server = fn.split("__")[0].replace("mcp_", "") + tool = fn.split("__")[1] if "__" in fn else fn + status.update(f"[ai_status]Engineer: [MCP:{server}] {tool}") elif fn in self.tool_status_formatters: status.update(self.tool_status_formatters[fn](args)) if debug: @@ -1624,6 +1321,8 @@ class ai: if fn == "list_nodes": obs = self.list_nodes_tool(**args) elif fn == "run_commands": obs = self.run_commands_tool(**args, status=status) elif fn == "get_node_info": obs = self.get_node_info_tool(**args) + elif fn.startswith("mcp_"): + obs = run_ai_async(self.mcp_manager.call_tool(fn, args)).result(timeout=60) elif fn in self.external_tool_handlers: obs = self.external_tool_handlers[fn](self, **args) else: obs = f"Error: Unknown tool '{fn}'." @@ -1644,14 +1343,22 @@ class ai: except Exception as e: return f"Engineer failed: {str(e)}", usage - def _get_engineer_tools(self): + def _get_engineer_tools(self, os_filter: str = None): """Define tools available to the Engineer.""" base_tools = [ - {"type": "function", "function": {"name": "list_nodes", "description": "Lists available nodes in the inventory.", "parameters": {"type": "object", "properties": {"filter_pattern": {"type": "string", "description": "Regex to filter nodes (e.g. '.*', 'border.*')."}}}}}, - {"type": "function", "function": {"name": "run_commands", "description": "Runs one or more commands on matched nodes. MANDATORY: You MUST call 'list_nodes' first to verify the target list.", "parameters": {"type": "object", "properties": {"nodes_filter": {"type": "string", "description": "Exact node name or verified filter pattern."}, "commands": {"type": "array", "items": {"type": "string"}, "description": "List of commands (e.g. ['show ip route', 'show int desc'])."}}, "required": ["nodes_filter", "commands"]}}}, - {"type": "function", "function": {"name": "get_node_info", "description": "Gets full metadata for a specific node.", "parameters": {"type": "object", "properties": {"node_name": {"type": "string"}}, "required": ["node_name"]}}} + {"type": "function", "function": {"name": "list_nodes", "description": "[Universal Platform] Lists available nodes in the inventory.", "parameters": {"type": "object", "properties": {"filter_pattern": {"type": "string", "description": "Regex to filter nodes (e.g. '.*', 'border.*')."}}}}}, + {"type": "function", "function": {"name": "run_commands", "description": "[Universal Platform] Runs one or more commands on matched nodes. MANDATORY: You MUST call 'list_nodes' first to verify the target list.", "parameters": {"type": "object", "properties": {"nodes_filter": {"type": "string", "description": "Exact node name or verified filter pattern."}, "commands": {"type": "array", "items": {"type": "string"}, "description": "List of commands (e.g. ['show ip route', 'show int desc'])."}}, "required": ["nodes_filter", "commands"]}}}, + {"type": "function", "function": {"name": "get_node_info", "description": "[Universal Platform] Gets full metadata for a specific node.", "parameters": {"type": "object", "properties": {"node_name": {"type": "string"}}, "required": ["node_name"]}}} ] + # Add dynamic tools from MCP + try: + mcp_tools = run_ai_async(self.mcp_manager.get_tools_for_llm(os_filter=os_filter)).result(timeout=10) + base_tools.extend(mcp_tools) + except Exception as e: + # Silently fail for LLM tools + pass + if self.architect_key: base_tools.extend([ {"type": "function", "function": {"name": "consult_architect", "description": "Ask the Strategic Reasoning Engine for advice on complex design, architecture, or troubleshooting decisions. You remain in control and will present the response to the user. Use this for: configuration planning, design validation, complex troubleshooting.", "parameters": {"type": "object", "properties": {"question": {"type": "string", "description": "Strategic question or decision needed."}, "technical_summary": {"type": "string", "description": "Technical findings and context gathered so far."}}, "required": ["question", "technical_summary"]}}}, @@ -1898,7 +1605,7 @@ class ai: streamed_response = False try: safe_messages = self._sanitize_messages(messages) - if stream and chunk_callback: + if stream: response, streamed_response = self._stream_completion( model=model, messages=safe_messages, tools=tools, api_key=key, status=status, label=label, debug=debug, num_retries=3, @@ -1937,8 +1644,8 @@ class ai: if msg_dict.get("tool_calls") and msg_dict.get("content") == "": msg_dict["content"] = None messages.append(msg_dict) - if debug and resp_msg.content: - # In CLI debug mode, only print intermediate reasoning if there are tool calls. + if debug and resp_msg.content and not streamed_response: + # In CLI debug mode, only print intermediate reasoning if there are tool calls AND it wasn't already streamed. # If there are no tool calls, this content is the final answer and will be printed by the caller. if resp_msg.tool_calls: if status: @@ -2045,6 +1752,8 @@ class ai: elif fn == "run_commands": obs = self.run_commands_tool(**args, status=status) elif fn == "get_node_info": obs = self.get_node_info_tool(**args) elif fn == "manage_memory_tool": obs = self.manage_memory_tool(**args) + elif fn.startswith("mcp_"): + obs = run_ai_async(self.mcp_manager.call_tool(fn, args)).result(timeout=60) elif fn in self.external_tool_handlers: obs = self.external_tool_handlers[fn](self, **args) else: obs = f"Error: {fn} unknown." @@ -2111,6 +1820,275 @@ class ai: "streamed": streamed_response } + @MethodHook + async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None): + import json + import re + from litellm import acompletion + import asyncio + import warnings + import aiohttp + + # Suppress unawaited coroutine warnings from LiteLLM's internal streaming logic during sudden cancellation + warnings.filterwarnings("ignore", message="coroutine '.*async_streaming.*' was never awaited", category=RuntimeWarning) + + node_info = node_info or {} + os_info = node_info.get("os", "unknown") + node_name = node_info.get("name", "unknown") + persona = node_info.get("persona", "engineer") + memories = node_info.get("memories", []) + + vendor_reference = "" + if os_info and os_info != "unknown": + try: + os_filename = os_info.lower().replace(" ", "_") + ref_path = os.path.join(self.config.defaultdir, "ai_references", f"{os_filename}.md") + if os.path.exists(ref_path): + with open(ref_path, "r") as f: + vendor_reference = f.read().strip() + except Exception: + pass + + if persona == "architect": + system_prompt = f"""Role: NETWORK ARCHITECT. You act as a senior strategic advisor during a live SSH session. +Rules: +1. Answer the user's question directly based on the Terminal Context. +2. Focus on the "why" and "how". Analyze topologies, design patterns, and validate configurations. +3. Do NOT provide commands to execute unless specifically requested. Instead, explain the consequences and best practices. +4. Keep your guide concise and authoritative. +5. You MUST output your response in the following strict format: +<guide> +Your brief tactical guide in markdown. +</guide> +<commands> +</commands> +<risk> +low +</risk> +6. Risk level is usually "low" for read-only/no commands. + +Terminal Context: +{terminal_buffer} + +Device OS: {os_info} +Node: {node_name}""" + else: + system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session. +Rules: +1. Answer the user's question directly based on the Terminal Context. +2. If the user asks you to analyze, parse, or extract data from the Terminal Context, DO IT directly in the <guide> section (you can use markdown tables or lists). Do NOT just give them a command to do it themselves. +3. If the user wants to execute an action, provide the required CLI commands inside a <commands> block, one command per line. If no commands are needed, leave it empty or omit the block. +4. ULTRA-CONCISE. Keep your guide to the point. +5. You MUST output your response in the following strict format: +<guide> +Your brief tactical guide in markdown. 3-4 sentences max. +</guide> +<commands> +command 1 +command 2 +</commands> +<risk> +low, high, or destructive +</risk> +6. Risk level: "low" for read-only/no commands, "high" for config changes, "destructive" for potentially dangerous ops. + +Terminal Context: +{terminal_buffer} + +Device OS: {os_info} +Node: {node_name}""" + + if vendor_reference: + system_prompt += f"\n\nVendor Command Reference:\n{vendor_reference}" + + if memories: + system_prompt += "\n\nSession Memory (Important Facts):\n" + for m in memories: + system_prompt += f"- {m}\n" + + # Fetch MCP tools for the current OS + mcp_tools = [] + try: + mcp_tools = await self.mcp_manager.get_tools_for_llm(os_filter=os_info) + except Exception: + pass + + if mcp_tools: + system_prompt += f"\n\nAvailable MCP Tools: {', '.join([t['function']['name'] for t in mcp_tools])}" + system_prompt += "\nUse these tools to validate syntax or find exact commands if needed before providing the final guide." + + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_question} + ] + + iteration = 0 + max_iterations = 5 # Allow up to 5 iterations for tool usage + + # Use models based on persona + current_model = self.architect_model if persona == "architect" else self.engineer_model + current_key = self.architect_key if persona == "architect" else self.engineer_key + + try: + while iteration < max_iterations: + iteration += 1 + response = await acompletion( + model=current_model, + messages=messages, + tools=mcp_tools if mcp_tools else None, + api_key=current_key, + stream=True + ) + + full_content = "" + streamed_guide = "" + tool_calls = [] + + async for chunk in response: + delta = chunk.choices[0].delta + + # Accumulate tool calls + if hasattr(delta, 'tool_calls') and delta.tool_calls: + for tc in delta.tool_calls: + idx = tc.index + if idx >= len(tool_calls): + tool_calls.append({"id": tc.id, "type": "function", "function": {"name": tc.function.name or "", "arguments": tc.function.arguments or ""}}) + else: + if tc.id: tool_calls[idx]["id"] = tc.id + if tc.function.name: tool_calls[idx]["function"]["name"] = tc.function.name + if tc.function.arguments: tool_calls[idx]["function"]["arguments"] += tc.function.arguments + + if hasattr(delta, 'content') and delta.content: + full_content += delta.content + + if chunk_callback and not tool_calls: # Only stream if not using tools + start_idx = full_content.find("<guide>") + if start_idx != -1: + after_start = full_content[start_idx + 7:] + end_idx = after_start.find("</guide>") + + if end_idx != -1: + current_guide = after_start[:end_idx] + else: + current_guide = after_start + if current_guide.endswith("<"): current_guide = current_guide[:-1] + elif current_guide.endswith("</"): current_guide = current_guide[:-2] + elif current_guide.endswith("</g"): current_guide = current_guide[:-3] + elif current_guide.endswith("</gu"): current_guide = current_guide[:-4] + elif current_guide.endswith("</gui"): current_guide = current_guide[:-5] + elif current_guide.endswith("</guid"): current_guide = current_guide[:-6] + elif current_guide.endswith("</guide"): current_guide = current_guide[:-7] + + new_text = current_guide[len(streamed_guide):] + if new_text: + chunk_callback(new_text) + streamed_guide += new_text + + if not tool_calls: + break + + # Execute tool calls + messages.append({"role": "assistant", "content": full_content or None, "tool_calls": tool_calls}) + for tc in tool_calls: + fn = tc["function"]["name"] + args = json.loads(tc["function"]["arguments"]) + + if "mcp_" in fn: + try: + obs = await asyncio.wait_for(self.mcp_manager.call_tool(fn, args), timeout=30.0) + except Exception as e: + obs = f"Error calling MCP tool: {e}" + else: + obs = f"Error: Tool {fn} not allowed in Copilot." + + messages.append({"tool_call_id": tc["id"], "role": "tool", "name": fn, "content": self._truncate(str(obs))}) + + # If we hit the limit and it was still using tools, force a final answer + if tool_calls and iteration >= max_iterations: + messages.append({"role": "user", "content": "Tool limit reached. Provide your final tactical guide now based on the findings."}) + response = await acompletion( + model=self.engineer_model, + messages=messages, + tools=None, + api_key=self.engineer_key, + stream=True + ) + + full_content = "" + streamed_guide = "" + async for chunk in response: + delta = chunk.choices[0].delta + if hasattr(delta, 'content') and delta.content: + full_content += delta.content + if chunk_callback: + start_idx = full_content.find("<guide>") + if start_idx != -1: + after_start = full_content[start_idx + 7:] + end_idx = after_start.find("</guide>") + if end_idx != -1: + current_guide = after_start[:end_idx] + else: + current_guide = after_start + if current_guide.endswith("<"): current_guide = current_guide[:-1] + elif current_guide.endswith("</"): current_guide = current_guide[:-2] + elif current_guide.endswith("</g"): current_guide = current_guide[:-3] + elif current_guide.endswith("</gu"): current_guide = current_guide[:-4] + elif current_guide.endswith("</gui"): current_guide = current_guide[:-5] + elif current_guide.endswith("</guid"): current_guide = current_guide[:-6] + elif current_guide.endswith("</guide"): current_guide = current_guide[:-7] + new_text = current_guide[len(streamed_guide):] + if new_text: + chunk_callback(new_text) + streamed_guide += new_text + + guide = "" + commands = [] + risk_level = "low" + + guide_match = re.search(r"<guide>(.*?)</guide>", full_content, re.DOTALL) + if guide_match: + guide = guide_match.group(1).strip() + + cmd_match = re.search(r"<commands>(.*?)</commands>", full_content, re.DOTALL) + if cmd_match: + cmds_raw = cmd_match.group(1).strip() + if cmds_raw: + commands = [c.strip() for c in cmds_raw.split('\n') if c.strip()] + + risk_match = re.search(r"<risk>(.*?)</risk>", full_content, re.DOTALL) + if risk_match: + risk_level = risk_match.group(1).strip().lower() + + if not guide and full_content and not ("<guide>" in full_content): + guide = full_content.strip() + + return { + "commands": commands, + "guide": guide, + "risk_level": risk_level, + "error": None + } + + except asyncio.CancelledError: + # Client cancelled the request via gRPC or local interrupt + if 'response' in locals(): + try: + if hasattr(response, 'aclose'): + # Fire and forget the close to avoid blocking the cancel + asyncio.create_task(response.aclose()) + elif hasattr(response, 'close'): + response.close() + except Exception: + pass + return None + except Exception as e: + return { + "commands": [], + "guide": "", + "risk_level": "low", + "error": str(e) + } + @MethodHook def confirm(self, user_input): return True @@ -2119,7 +2097,7 @@ class ai:
    var SAFE_COMMANDS
    -

    The type of the None singleton.

    +

    Instance variables

    @@ -2159,6 +2137,285 @@ def engineer_system_prompt(self):

    Methods

    +
    +async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None) +
    +
    +
    + +Expand source code + +
        @MethodHook
    +    async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
    +        import json
    +        import re
    +        from litellm import acompletion
    +        import asyncio
    +        import warnings
    +        import aiohttp
    +        
    +        # Suppress unawaited coroutine warnings from LiteLLM's internal streaming logic during sudden cancellation
    +        warnings.filterwarnings("ignore", message="coroutine '.*async_streaming.*' was never awaited", category=RuntimeWarning)
    +        
    +        node_info = node_info or {}
    +        os_info = node_info.get("os", "unknown")
    +        node_name = node_info.get("name", "unknown")
    +        persona = node_info.get("persona", "engineer")
    +        memories = node_info.get("memories", [])
    +        
    +        vendor_reference = ""
    +        if os_info and os_info != "unknown":
    +            try:
    +                os_filename = os_info.lower().replace(" ", "_")
    +                ref_path = os.path.join(self.config.defaultdir, "ai_references", f"{os_filename}.md")
    +                if os.path.exists(ref_path):
    +                    with open(ref_path, "r") as f:
    +                        vendor_reference = f.read().strip()
    +            except Exception:
    +                pass
    +        
    +        if persona == "architect":
    +            system_prompt = f"""Role: NETWORK ARCHITECT. You act as a senior strategic advisor during a live SSH session.
    +Rules:
    +1. Answer the user's question directly based on the Terminal Context.
    +2. Focus on the "why" and "how". Analyze topologies, design patterns, and validate configurations.
    +3. Do NOT provide commands to execute unless specifically requested. Instead, explain the consequences and best practices.
    +4. Keep your guide concise and authoritative.
    +5. You MUST output your response in the following strict format:
    +<guide>
    +Your brief tactical guide in markdown.
    +</guide>
    +<commands>
    +</commands>
    +<risk>
    +low
    +</risk>
    +6. Risk level is usually "low" for read-only/no commands.
    +
    +Terminal Context:
    +{terminal_buffer}
    +
    +Device OS: {os_info}
    +Node: {node_name}"""
    +        else:
    +            system_prompt = f"""Role: TERMINAL COPILOT. You assist a network engineer during a live SSH session.
    +Rules:
    +1. Answer the user's question directly based on the Terminal Context.
    +2. If the user asks you to analyze, parse, or extract data from the Terminal Context, DO IT directly in the <guide> section (you can use markdown tables or lists). Do NOT just give them a command to do it themselves.
    +3. If the user wants to execute an action, provide the required CLI commands inside a <commands> block, one command per line. If no commands are needed, leave it empty or omit the block.
    +4. ULTRA-CONCISE. Keep your guide to the point.
    +5. You MUST output your response in the following strict format:
    +<guide>
    +Your brief tactical guide in markdown. 3-4 sentences max.
    +</guide>
    +<commands>
    +command 1
    +command 2
    +</commands>
    +<risk>
    +low, high, or destructive
    +</risk>
    +6. Risk level: "low" for read-only/no commands, "high" for config changes, "destructive" for potentially dangerous ops.
    +
    +Terminal Context:
    +{terminal_buffer}
    +
    +Device OS: {os_info}
    +Node: {node_name}"""
    +        
    +        if vendor_reference:
    +            system_prompt += f"\n\nVendor Command Reference:\n{vendor_reference}"
    +
    +        if memories:
    +            system_prompt += "\n\nSession Memory (Important Facts):\n"
    +            for m in memories:
    +                system_prompt += f"- {m}\n"
    +
    +        # Fetch MCP tools for the current OS
    +        mcp_tools = []
    +        try:
    +            mcp_tools = await self.mcp_manager.get_tools_for_llm(os_filter=os_info)
    +        except Exception:
    +            pass
    +            
    +        if mcp_tools:
    +            system_prompt += f"\n\nAvailable MCP Tools: {', '.join([t['function']['name'] for t in mcp_tools])}"
    +            system_prompt += "\nUse these tools to validate syntax or find exact commands if needed before providing the final guide."
    +
    +        messages = [
    +            {"role": "system", "content": system_prompt},
    +            {"role": "user", "content": user_question}
    +        ]
    +
    +        iteration = 0
    +        max_iterations = 5 # Allow up to 5 iterations for tool usage
    +        
    +        # Use models based on persona
    +        current_model = self.architect_model if persona == "architect" else self.engineer_model
    +        current_key = self.architect_key if persona == "architect" else self.engineer_key
    +
    +        try:
    +            while iteration < max_iterations:
    +                iteration += 1
    +                response = await acompletion(
    +                    model=current_model,
    +                    messages=messages,
    +                    tools=mcp_tools if mcp_tools else None,
    +                    api_key=current_key,
    +                    stream=True
    +                )
    +                
    +                full_content = ""
    +                streamed_guide = ""
    +                tool_calls = []
    +                
    +                async for chunk in response:
    +                    delta = chunk.choices[0].delta
    +                    
    +                    # Accumulate tool calls
    +                    if hasattr(delta, 'tool_calls') and delta.tool_calls:
    +                        for tc in delta.tool_calls:
    +                            idx = tc.index
    +                            if idx >= len(tool_calls):
    +                                tool_calls.append({"id": tc.id, "type": "function", "function": {"name": tc.function.name or "", "arguments": tc.function.arguments or ""}})
    +                            else:
    +                                if tc.id: tool_calls[idx]["id"] = tc.id
    +                                if tc.function.name: tool_calls[idx]["function"]["name"] = tc.function.name
    +                                if tc.function.arguments: tool_calls[idx]["function"]["arguments"] += tc.function.arguments
    +
    +                    if hasattr(delta, 'content') and delta.content:
    +                        full_content += delta.content
    +                        
    +                        if chunk_callback and not tool_calls: # Only stream if not using tools
    +                            start_idx = full_content.find("<guide>")
    +                            if start_idx != -1:
    +                                after_start = full_content[start_idx + 7:]
    +                                end_idx = after_start.find("</guide>")
    +                                
    +                                if end_idx != -1:
    +                                    current_guide = after_start[:end_idx]
    +                                else:
    +                                    current_guide = after_start
    +                                    if current_guide.endswith("<"): current_guide = current_guide[:-1]
    +                                    elif current_guide.endswith("</"): current_guide = current_guide[:-2]
    +                                    elif current_guide.endswith("</g"): current_guide = current_guide[:-3]
    +                                    elif current_guide.endswith("</gu"): current_guide = current_guide[:-4]
    +                                    elif current_guide.endswith("</gui"): current_guide = current_guide[:-5]
    +                                    elif current_guide.endswith("</guid"): current_guide = current_guide[:-6]
    +                                    elif current_guide.endswith("</guide"): current_guide = current_guide[:-7]
    +                                
    +                                new_text = current_guide[len(streamed_guide):]
    +                                if new_text:
    +                                    chunk_callback(new_text)
    +                                    streamed_guide += new_text
    +
    +                if not tool_calls:
    +                    break
    +                    
    +                # Execute tool calls
    +                messages.append({"role": "assistant", "content": full_content or None, "tool_calls": tool_calls})
    +                for tc in tool_calls:
    +                    fn = tc["function"]["name"]
    +                    args = json.loads(tc["function"]["arguments"])
    +                    
    +                    if "mcp_" in fn:
    +                        try:
    +                            obs = await asyncio.wait_for(self.mcp_manager.call_tool(fn, args), timeout=30.0)
    +                        except Exception as e:
    +                            obs = f"Error calling MCP tool: {e}"
    +                    else:
    +                        obs = f"Error: Tool {fn} not allowed in Copilot."
    +                        
    +                    messages.append({"tool_call_id": tc["id"], "role": "tool", "name": fn, "content": self._truncate(str(obs))})
    +
    +            # If we hit the limit and it was still using tools, force a final answer
    +            if tool_calls and iteration >= max_iterations:
    +                messages.append({"role": "user", "content": "Tool limit reached. Provide your final tactical guide now based on the findings."})
    +                response = await acompletion(
    +                    model=self.engineer_model,
    +                    messages=messages,
    +                    tools=None,
    +                    api_key=self.engineer_key,
    +                    stream=True
    +                )
    +                
    +                full_content = ""
    +                streamed_guide = ""
    +                async for chunk in response:
    +                    delta = chunk.choices[0].delta
    +                    if hasattr(delta, 'content') and delta.content:
    +                        full_content += delta.content
    +                        if chunk_callback:
    +                            start_idx = full_content.find("<guide>")
    +                            if start_idx != -1:
    +                                after_start = full_content[start_idx + 7:]
    +                                end_idx = after_start.find("</guide>")
    +                                if end_idx != -1:
    +                                    current_guide = after_start[:end_idx]
    +                                else:
    +                                    current_guide = after_start
    +                                    if current_guide.endswith("<"): current_guide = current_guide[:-1]
    +                                    elif current_guide.endswith("</"): current_guide = current_guide[:-2]
    +                                    elif current_guide.endswith("</g"): current_guide = current_guide[:-3]
    +                                    elif current_guide.endswith("</gu"): current_guide = current_guide[:-4]
    +                                    elif current_guide.endswith("</gui"): current_guide = current_guide[:-5]
    +                                    elif current_guide.endswith("</guid"): current_guide = current_guide[:-6]
    +                                    elif current_guide.endswith("</guide"): current_guide = current_guide[:-7]
    +                                new_text = current_guide[len(streamed_guide):]
    +                                if new_text:
    +                                    chunk_callback(new_text)
    +                                    streamed_guide += new_text
    +
    +            guide = ""
    +            commands = []
    +            risk_level = "low"
    +            
    +            guide_match = re.search(r"<guide>(.*?)</guide>", full_content, re.DOTALL)
    +            if guide_match:
    +                guide = guide_match.group(1).strip()
    +                
    +            cmd_match = re.search(r"<commands>(.*?)</commands>", full_content, re.DOTALL)
    +            if cmd_match:
    +                cmds_raw = cmd_match.group(1).strip()
    +                if cmds_raw:
    +                    commands = [c.strip() for c in cmds_raw.split('\n') if c.strip()]
    +                    
    +            risk_match = re.search(r"<risk>(.*?)</risk>", full_content, re.DOTALL)
    +            if risk_match:
    +                risk_level = risk_match.group(1).strip().lower()
    +
    +            if not guide and full_content and not ("<guide>" in full_content):
    +                guide = full_content.strip()
    +
    +            return {
    +                "commands": commands,
    +                "guide": guide,
    +                "risk_level": risk_level,
    +                "error": None
    +            }
    +            
    +        except asyncio.CancelledError:
    +            # Client cancelled the request via gRPC or local interrupt
    +            if 'response' in locals():
    +                try:
    +                    if hasattr(response, 'aclose'):
    +                        # Fire and forget the close to avoid blocking the cancel
    +                        asyncio.create_task(response.aclose())
    +                    elif hasattr(response, 'close'):
    +                        response.close()
    +                except Exception:
    +                    pass
    +            return None
    +        except Exception as e:
    +            return {
    +                "commands": [],
    +                "guide": "",
    +                "risk_level": "low",
    +                "error": str(e)
    +            }
    +
    +
    +
    def ask(self,
    user_input,
    dryrun=False,
    chat_history=None,
    status=None,
    debug=False,
    stream=True,
    session_id=None,
    chunk_callback=None)
    @@ -2271,7 +2528,7 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa streamed_response = False try: safe_messages = self._sanitize_messages(messages) - if stream and chunk_callback: + if stream: response, streamed_response = self._stream_completion( model=model, messages=safe_messages, tools=tools, api_key=key, status=status, label=label, debug=debug, num_retries=3, @@ -2310,8 +2567,8 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa if msg_dict.get("tool_calls") and msg_dict.get("content") == "": msg_dict["content"] = None messages.append(msg_dict) - if debug and resp_msg.content: - # In CLI debug mode, only print intermediate reasoning if there are tool calls. + if debug and resp_msg.content and not streamed_response: + # In CLI debug mode, only print intermediate reasoning if there are tool calls AND it wasn't already streamed. # If there are no tool calls, this content is the final answer and will be printed by the caller. if resp_msg.tool_calls: if status: @@ -2418,6 +2675,8 @@ def ask(self, user_input, dryrun=False, chat_history=None, status=None, debug=Fa elif fn == "run_commands": obs = self.run_commands_tool(**args, status=status) elif fn == "get_node_info": obs = self.get_node_info_tool(**args) elif fn == "manage_memory_tool": obs = self.manage_memory_tool(**args) + elif fn.startswith("mcp_"): + obs = run_ai_async(self.mcp_manager.call_tool(fn, args)).result(timeout=60) elif fn in self.external_tool_handlers: obs = self.external_tool_handlers[fn](self, **args) else: obs = f"Error: {fn} unknown." @@ -2775,7 +3034,7 @@ def confirm(self, user_input): return True
    self.console.print("[pass]βœ“ Trust Mode Enabled. All future commands in this session will execute without confirmation.[/pass]") elif user_resp_lower in ['y', 'yes']: self.console.print("[pass]βœ“ Executing...[/pass]") - elif user_resp_lower in ['n', 'no', '']: + elif user_resp_lower in ['n', 'no', '', 'cancel']: self.console.print("[fail]βœ— Execution rejected by user.[/fail]") return "Error: User rejected execution." else: @@ -3746,6 +4005,7 @@ class node: - jumphost (str): Reference another node to be used as a jumphost ''' + self.config = config if config == '': self.idletime = 0 self.key = None @@ -3903,61 +4163,29 @@ class node: @MethodHook def _logclean(self, logfile, var = False): - # Remove special ascii characters and process terminal cursor movements to clean logs. + """Remove special ascii characters and process terminal cursor movements to clean logs.""" + from .utils import log_cleaner + if var == False: - t = open(logfile, "r").read() + try: + with open(logfile, "r") as f: + t = f.read() + except: + return else: t = logfile - lines = t.split('\n') - cleaned_lines = [] - - # Regex to capture: ANSI sequences, control characters (\r, \b, etc), and plain text chunks - token_re = re.compile(r'(\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/ ]*[@-~])|\r|\b|\x7f|[\x00-\x1F]|[^\x1B\r\b\x7f\x00-\x1F]+)') - - for line in lines: - buffer = [] - cursor = 0 - - for token in token_re.findall(line): - if token == '\r': - cursor = 0 - elif token in ('\b', '\x7f'): - if cursor > 0: - cursor -= 1 - elif token == '\x1B[D': # Left Arrow - if cursor > 0: - cursor -= 1 - elif token == '\x1B[C': # Right Arrow - if cursor < len(buffer): - cursor += 1 - elif token == '\x1B[K': # Clear to end of line - buffer = buffer[:cursor] - elif token.startswith('\x1B'): - # Ignore other ANSI sequences (colors, etc) - continue - elif len(token) == 1 and ord(token) < 32: - # Ignore other non-printable control chars - continue - else: - # Regular printable text - for char in token: - if cursor == len(buffer): - buffer.append(char) - else: - buffer[cursor] = char - cursor += 1 - cleaned_lines.append("".join(buffer)) - - t = "\n".join(cleaned_lines).replace('\n\n', '\n').strip() + result = log_cleaner(t) if var == False: - d = open(logfile, "w") - d.write(t) - d.close() + try: + with open(logfile, "w") as f: + f.write(result) + except: + pass return else: - return t + return result @MethodHook def _savelog(self): @@ -4000,14 +4228,17 @@ class node: port_str = f":{self.port}" if self.port and self.protocol not in ["ssm", "kubectl", "docker"] else "" logger("success", f"Connected to {self.unique} at {self.host}{port_str} via: {self.protocol}") + # Always initialize self.mylog to capture terminal context for the AI Copilot + if not hasattr(self, 'mylog'): + self.mylog = io.BytesIO() + + if not async_mode: + self.child.logfile_read = self.mylog + + # Only start disk-logging tasks if logfile is configured if 'logfile' in dir(self): - # Initialize self.mylog - if not 'mylog' in dir(self): - self.mylog = io.BytesIO() if not async_mode: - self.child.logfile_read = self.mylog - - # Start the _savelog thread + # Start the _savelog thread (sync mode) log_thread = threading.Thread(target=self._savelog) log_thread.daemon = True log_thread.start() @@ -4019,14 +4250,15 @@ class node: x.start() if debug: if 'mylog' in dir(self): - print(self.mylog.getvalue().decode()) + if not async_mode: + print(self.mylog.getvalue().decode()) def _teardown_interact_environment(self): if 'logfile' in dir(self) and hasattr(self, 'mylog'): with open(self.logfile, "w") as f: f.write(self._logclean(self.mylog.getvalue().decode(), True)) - async def _async_interact_loop(self, local_stream, resize_callback): + async def _async_interact_loop(self, local_stream, resize_callback, copilot_handler=None): local_stream.setup(resize_callback=resize_callback) try: child_fd = self.child.child_fd @@ -4061,9 +4293,14 @@ class node: loop = asyncio.get_running_loop() child_reader_queue = asyncio.Queue() + # Track command byte positions for copilot context navigation + # Each entry is (byte_position, command_text_or_None) + cmd_byte_positions = [(0, None)] + def _child_read_ready(): try: - data = os.read(child_fd, 4096) + # Increase buffer to 64KB for better high-speed handling + data = os.read(child_fd, 65536) if data: child_reader_queue.put_nowait(data) else: @@ -4081,11 +4318,39 @@ class node: data = await local_stream.read() if not data: break - try: - os.write(child_fd, data) - except OSError: - break - self.lastinput = time() + + # Copilot interception + if copilot_handler and b'\x00' in data: + # Build node info from available metadata and ensure values are strings (not bytes) + def to_str(val): + if isinstance(val, bytes): + return val.decode(errors='replace') + return str(val) if val is not None else "unknown" + + node_info = { + "name": to_str(getattr(self, 'unique', 'unknown')), + "host": to_str(getattr(self, 'host', 'unknown')) + } + if isinstance(getattr(self, 'tags', None), dict): + node_info["os"] = to_str(self.tags.get("os", "unknown")) + node_info["prompt"] = to_str(self.tags.get("prompt", r'>$|#$|\$$|>.$|#.$|\$.$')) + + # Invoke copilot (async callback handles UI) + await copilot_handler(self.mylog.getvalue(), node_info, local_stream, child_fd, cmd_byte_positions) + continue + + # Remove any stray \x00 bytes and forward normally + clean_data = data.replace(b'\x00', b'') + if clean_data: + # Track command boundaries when user hits Enter + if hasattr(self, 'mylog') and (b'\r' in clean_data or b'\n' in clean_data): + cmd_byte_positions.append((self.mylog.tell(), None)) + + try: + os.write(child_fd, clean_data) + except OSError: + break + self.lastinput = time() async def egress_task(): # Continue stripping newlines from the live stream until we hit real text @@ -4094,18 +4359,41 @@ class node: data = await child_reader_queue.get() if not data: break - - if skip_newlines: - stripped = data.lstrip(b'\r\n') - if stripped: - skip_newlines = False - data = stripped - else: - continue - - await local_stream.write(data) - if hasattr(self, 'mylog'): - self.mylog.write(data) + + # Batching Optimization: Drain the queue to batch writes during high-volume bursts + # Helps the terminal parse ANSI faster and reduces syscalls. + chunks = [data] + while not child_reader_queue.empty(): + try: + extra = child_reader_queue.get_nowait() + if not extra: + chunks.append(b'') # Re-put EOF later or handle it + break + chunks.append(extra) + except asyncio.QueueEmpty: + break + + has_eof = chunks[-1] == b'' + if has_eof: + chunks.pop() + + if chunks: + combined_data = b''.join(chunks) + if skip_newlines: + stripped = combined_data.lstrip(b'\r\n') + if stripped: + skip_newlines = False + combined_data = stripped + else: + if has_eof: break + continue + + await local_stream.write(combined_data) + if hasattr(self, 'mylog'): + self.mylog.write(combined_data) + + if has_eof: + break async def keepalive_task(): while True: @@ -4124,16 +4412,17 @@ class node: current_size = self.mylog.tell() if current_size != prev_size: try: + # Move heavy log cleaning to a thread to avoid freezing the interaction loop + raw_log = self.mylog.getvalue().decode(errors='replace') + cleaned_log = await asyncio.to_thread(self._logclean, raw_log, True) with open(self.logfile, "w") as f: - f.write(self._logclean(self.mylog.getvalue().decode(), True)) + f.write(cleaned_log) prev_size = current_size except Exception: pass try: - # gather runs until any task completes (or we just let them run until EOF breaks them) - # Ingress breaks on user EOF. Egress breaks on child EOF. - # We want to exit if either happens, so return_exceptions=False, but we need to cancel the others. + # We wait for either the user (ingress) or the child (egress) to finish tasks = [ asyncio.create_task(ingress_task()), asyncio.create_task(egress_task()) @@ -4142,9 +4431,34 @@ class node: tasks.append(asyncio.create_task(keepalive_task())) if hasattr(self, 'logfile') and hasattr(self, 'mylog'): tasks.append(asyncio.create_task(savelog_task())) - done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) - for p in pending: - p.cancel() + + done, pending = await asyncio.wait( + [tasks[0], tasks[1]], + return_when=asyncio.FIRST_COMPLETED + ) + + # If ingress finished first (user quit), give egress a small window to catch up + # on the remaining output in the queue. + if tasks[0] in done and tasks[1] not in done: + try: + await asyncio.wait_for(tasks[1], timeout=0.2) + except (asyncio.TimeoutError, asyncio.CancelledError): + pass + + for t in tasks: + if t not in done: + t.cancel() + + # Final log sync on thread to avoid losing last lines + if hasattr(self, 'logfile') and hasattr(self, 'mylog'): + try: + raw_log = self.mylog.getvalue().decode(errors='replace') + cleaned_log = await asyncio.to_thread(self._logclean, raw_log, True) + with open(self.logfile, "w") as f: + f.write(cleaned_log) + except Exception: + pass + finally: loop.remove_reader(child_fd) try: @@ -4175,7 +4489,10 @@ class node: except Exception: pass - asyncio.run(self._async_interact_loop(local_stream, resize_callback)) + # Build local copilot handler + copilot_handler = self._build_local_copilot_handler() + + asyncio.run(self._async_interact_loop(local_stream, resize_callback, copilot_handler=copilot_handler)) finally: self._teardown_interact_environment() else: @@ -4185,6 +4502,93 @@ class node: printer.error(f"Connection failed: {str(connect)}") sys.exit(1) + def _build_local_copilot_handler(self): + """Build copilot handler for local CLI sessions using rich for rendering.""" + config = getattr(self, 'config', None) if hasattr(self, 'config') else None + return self._copilot_handler(config) + + def _copilot_handler(self, config): + """Unified copilot handler for local session.""" + from .cli.terminal_ui import CopilotInterface + from .services.ai_service import AIService + import asyncio + import os + + async def handler(buffer, node_info, stream, child_fd, cmd_byte_positions=None): + try: + interface = CopilotInterface( + config, + history=getattr(stream, 'copilot_history', None), + session_state=getattr(stream, 'copilot_state', None) + ) + # Save history back to stream for persistence in current session + stream.copilot_history = interface.history + stream.copilot_state = interface.session_state + + ai_service = AIService(config) + + async def on_ai_call(active_buffer, question, chunk_callback, merged_node_info): + return await ai_service.aask_copilot( + active_buffer, + question, + node_info=merged_node_info, + chunk_callback=chunk_callback + ) + # Get raw bytes from BytesIO + raw_bytes = self.mylog.getvalue() + + # Detener el lector de la terminal para que prompt_toolkit (en run_session) + # tenga control exclusivo del stdin sin interferencias de LocalStream. + if hasattr(stream, 'stop_reading'): + stream.stop_reading() + elif hasattr(stream, '_loop') and hasattr(stream, 'stdin_fd'): + # Fallback si no tiene el mΓ©todo (en LocalStream) + stream._loop.remove_reader(stream.stdin_fd) + + try: + with copilot_terminal_mode(): + while True: + action, commands, custom_cmd = await interface.run_session( + raw_bytes=raw_bytes, + cmd_byte_positions=cmd_byte_positions, + node_info=node_info, + on_ai_call=on_ai_call + ) + if action == "continue": + continue + break + finally: + # Reiniciar el lector de la terminal para volver al modo interactivo SSH/Telnet + if hasattr(stream, 'start_reading'): + stream.start_reading() + elif hasattr(stream, '_loop') and hasattr(stream, 'stdin_fd'): + stream._loop.add_reader(stream.stdin_fd, stream._read_ready) + + if action in ("send_all", "custom"): + cmds_to_send = commands if action == "send_all" else custom_cmd + + if cmds_to_send: + os.write(child_fd, b'\x15') # Ctrl+U + await asyncio.sleep(0.1) + + # Prepend screen length command to avoid pagination + if "screen_length_command" in self.tags: + cmds_to_send.insert(0, self.tags["screen_length_command"]) + + for cmd in cmds_to_send: + if cmd_byte_positions is not None: + cmd_byte_positions.append((self.mylog.tell(), cmd)) + os.write(child_fd, (cmd + "\n").encode()) + await asyncio.sleep(0.8) + else: + os.write(child_fd, b'\x15\r') + except Exception as e: + import traceback + print(f"\n[ERROR in Copilot Handler] {e}", flush=True) + traceback.print_exc() + os.write(child_fd, b'\x15\r') + + return handler @MethodHook def run(self, commands, vars = None,*, folder = '', prompt = r'>$|#$|\$$|>.$|#.$|\$.$', stdout = False, timeout = 10, logger = None): @@ -4246,7 +4650,6 @@ class node: if "prompt" in self.tags: prompt = self.tags["prompt"] expects = [prompt, pexpect.EOF, pexpect.TIMEOUT] - output = '' status = '' if not isinstance(commands, list): @@ -4357,7 +4760,6 @@ class node: if "prompt" in self.tags: prompt = self.tags["prompt"] expects = [prompt, pexpect.EOF, pexpect.TIMEOUT] - output = '' if not isinstance(commands, list): commands = [commands] @@ -4423,15 +4825,18 @@ class node: @MethodHook def _generate_ssh_sftp_cmd(self): cmd = self.protocol - if self.idletime > 0: - cmd += " -o ServerAliveInterval=" + str(self.idletime) if self.port: if self.protocol == "ssh": cmd += " -p " + self.port elif self.protocol == "sftp": cmd += " -P " + self.port if self.options: - cmd += " " + self.options + opts = self.options + if self.protocol == "sftp": + # Strip SSH-only flags that sftp doesn't support + opts = re.sub(r'(?<!\S)-[XxtTAaNf]\b', '', opts).strip() + if opts: + cmd += " " + opts if self.jumphost: cmd += " " + self.jumphost user_host = f"{self.user}@{self.host}" if self.user else self.host @@ -4474,6 +4879,19 @@ class node: cmd += f" {self.options}" return cmd + @MethodHook + def _generate_ssm_cmd(self): + region = self.tags.get("region", "") if isinstance(self.tags, dict) else "" + profile = self.tags.get("profile", "") if isinstance(self.tags, dict) else "" + cmd = f"aws ssm start-session --target {self.host}" + if region: + cmd += f" --region {region}" + if profile: + cmd += f" --profile {profile}" + if self.options: + cmd += f" {self.options}" + return cmd + @MethodHook def _get_cmd(self): if self.protocol in ["ssh", "sftp"]: @@ -4546,6 +4964,7 @@ class node: if logger: logger("debug", f"Command:\n{cmd}") self.mylog = io.BytesIO() + self.mylog.write(f"[i] [DEBUG] Command:\r\n {cmd}\r\n".encode()) child.logfile_read = self.mylog @@ -4681,7 +5100,10 @@ def interact(self, debug=False, logger=None): except Exception: pass - asyncio.run(self._async_interact_loop(local_stream, resize_callback)) + # Build local copilot handler + copilot_handler = self._build_local_copilot_handler() + + asyncio.run(self._async_interact_loop(local_stream, resize_callback, copilot_handler=copilot_handler)) finally: self._teardown_interact_environment() else: @@ -4762,7 +5184,6 @@ def run(self, commands, vars = None,*, folder = '', prompt = r'>$ if "prompt" in self.tags: prompt = self.tags["prompt"] expects = [prompt, pexpect.EOF, pexpect.TIMEOUT] - output = '' status = '' if not isinstance(commands, list): @@ -4914,7 +5335,6 @@ def test(self, commands, expected, vars = None,*, folder = '', prompt = if "prompt" in self.tags: prompt = self.tags["prompt"] expects = [prompt, pexpect.EOF, pexpect.TIMEOUT] - output = '' if not isinstance(commands, list): commands = [commands] @@ -5682,57 +6102,38 @@ def test(self, commands, expected, vars = None,*, folder = None, prompt = None, diff --git a/docs/connpy/mcp_client.html b/docs/connpy/mcp_client.html new file mode 100644 index 0000000..d2d6fd2 --- /dev/null +++ b/docs/connpy/mcp_client.html @@ -0,0 +1,349 @@ + + + + + + +connpy.mcp_client API documentation + + + + + + + + + + + +
    +
    +
    +

    Module connpy.mcp_client

    +
    +
    +
    +
    +
    +
    +
    +
    +
    +
    +

    Classes

    +
    +
    +class MCPClientManager +(config=None) +
    +
    +
    + +Expand source code + +
    class MCPClientManager:
    +    """Manages MCP SSE client connections for connpy."""
    +    
    +    _instance = None
    +    _lock = threading.Lock()
    +
    +    def __new__(cls, *args, **kwargs):
    +        with cls._lock:
    +            if cls._instance is None:
    +                cls._instance = super(MCPClientManager, cls).__new__(cls)
    +                cls._instance._initialized = False
    +            return cls._instance
    +
    +    def __init__(self, config=None):
    +        if self._initialized:
    +            return
    +        self.config = config
    +        self.sessions: Dict[str, Dict[str, Any]] = {} # name -> {session, stack}
    +        self.tool_cache: Dict[str, List[Dict[str, Any]]] = {}
    +        self._connecting: Dict[str, asyncio.Future] = {}
    +        self._initialized = True
    +
    +    async def get_tools_for_llm(self, os_filter: Optional[str] = None) -> List[Dict[str, Any]]:
    +        """
    +        Fetches tools from enabled MCP servers that match the OS filter.
    +        """
    +        if not MCP_AVAILABLE:
    +            return []
    +
    +        all_llm_tools = []
    +        try:
    +            mcp_config = self.config.config.get("ai", {}).get("mcp_servers", {})
    +        except Exception:
    +            return []
    +        
    +        async def _fetch(name, cfg):
    +            if not cfg.get("enabled", True): return []
    +            
    +            # Filter by OS if specified in config (primarily used for copilot strict matching)
    +            auto_os = cfg.get("auto_load_on_os")
    +            if os_filter is not None and auto_os and os_filter.lower() != auto_os.lower():
    +                return []
    +
    +            try:
    +                session = await self._ensure_connected(name, cfg)
    +                if session:
    +                    if name in self.tool_cache: return self.tool_cache[name]
    +                    llm_tools = await self._fetch_tools_as_openai(name, session)
    +                    self.tool_cache[name] = llm_tools
    +                    return llm_tools
    +            except Exception:
    +                pass
    +            return []
    +
    +        tasks = [ _fetch(name, cfg) for name, cfg in mcp_config.items() ]
    +        
    +        if tasks:
    +            results = await asyncio.gather(*tasks)
    +            for tools in results:
    +                all_llm_tools.extend(tools)
    +                
    +        return all_llm_tools
    +
    +    async def _ensure_connected(self, name: str, cfg: Dict[str, Any]) -> Optional[Any]:
    +        if not MCP_AVAILABLE: return None
    +
    +        if name in self.sessions and self.sessions[name].get("session"):
    +            return self.sessions[name]["session"]
    +
    +        url = cfg.get("url")
    +        if not url:
    +            return None
    +
    +        if name in self._connecting:
    +            try:
    +                return await asyncio.wait_for(asyncio.shield(self._connecting[name]), timeout=10.0)
    +            except Exception:
    +                return None
    +
    +        loop = asyncio.get_running_loop()
    +        fut = loop.create_future()
    +        self._connecting[name] = fut
    +
    +        try:
    +            from contextlib import AsyncExitStack
    +            stack = AsyncExitStack()
    +            
    +            async def _do_connect():
    +                read, write = await stack.enter_async_context(sse_client(url))
    +                session = await stack.enter_async_context(ClientSession(read, write))
    +                await session.initialize()
    +                return session
    +
    +            session = await asyncio.wait_for(_do_connect(), timeout=15.0)
    +            self.sessions[name] = {"session": session, "stack": stack}
    +            fut.set_result(session)
    +            return session
    +        except Exception:
    +            fut.set_result(None)
    +            return None
    +        finally:
    +            if name in self._connecting:
    +                del self._connecting[name]
    +
    +    async def _fetch_tools_as_openai(self, server_name: str, session: Any) -> List[Dict[str, Any]]:
    +        try:
    +            result = await asyncio.wait_for(session.list_tools(), timeout=5.0)
    +            openai_tools = []
    +            for tool in result.tools:
    +                # Use mcp_ prefix to ensure valid function name for LiteLLM/Gemini
    +                prefixed_name = f"mcp_{server_name}__{tool.name}"
    +                openai_tools.append({
    +                    "type": "function",
    +                    "function": {
    +                        "name": prefixed_name,
    +                        "description": f"[{server_name}] {tool.description}",
    +                        "parameters": tool.inputSchema
    +                    }
    +                })
    +            return openai_tools
    +        except Exception:
    +            return []
    +
    +    async def call_tool(self, full_tool_name: str, arguments: Dict[str, Any]) -> Any:
    +        """Calls an MCP tool and returns text result."""
    +        if not MCP_AVAILABLE:
    +            return "Error: MCP SDK is not installed."
    +
    +        if "__" not in full_tool_name:
    +            return f"Error: Tool {full_tool_name} is not a valid MCP tool."
    +            
    +        clean_name = full_tool_name[4:] if full_tool_name.startswith("mcp_") else full_tool_name
    +        server_name, tool_name = clean_name.split("__", 1)
    +        
    +        if server_name not in self.sessions:
    +            return f"Error: MCP server {server_name} is not connected."
    +            
    +        session = self.sessions[server_name]["session"]
    +        try:
    +            result = await asyncio.wait_for(session.call_tool(tool_name, arguments), timeout=60.0)
    +            text_outputs = [content.text for content in result.content if hasattr(content, "text")]
    +            return "\n".join(text_outputs) if text_outputs else str(result)
    +        except Exception as e:
    +            return f"Error calling tool {tool_name} on {server_name}: {str(e)}"
    +
    +    async def shutdown(self):
    +        """Close all SSE connections."""
    +        for name, data in self.sessions.items():
    +            stack = data.get("stack")
    +            if stack:
    +                await stack.aclose()
    +        self.sessions = {}
    +
    +

    Manages MCP SSE client connections for connpy.

    +

    Methods

    +
    +
    +async def call_tool(self, full_tool_name:Β str, arguments:Β Dict[str,Β Any]) ‑>Β Any +
    +
    +
    + +Expand source code + +
    async def call_tool(self, full_tool_name: str, arguments: Dict[str, Any]) -> Any:
    +    """Calls an MCP tool and returns text result."""
    +    if not MCP_AVAILABLE:
    +        return "Error: MCP SDK is not installed."
    +
    +    if "__" not in full_tool_name:
    +        return f"Error: Tool {full_tool_name} is not a valid MCP tool."
    +        
    +    clean_name = full_tool_name[4:] if full_tool_name.startswith("mcp_") else full_tool_name
    +    server_name, tool_name = clean_name.split("__", 1)
    +    
    +    if server_name not in self.sessions:
    +        return f"Error: MCP server {server_name} is not connected."
    +        
    +    session = self.sessions[server_name]["session"]
    +    try:
    +        result = await asyncio.wait_for(session.call_tool(tool_name, arguments), timeout=60.0)
    +        text_outputs = [content.text for content in result.content if hasattr(content, "text")]
    +        return "\n".join(text_outputs) if text_outputs else str(result)
    +    except Exception as e:
    +        return f"Error calling tool {tool_name} on {server_name}: {str(e)}"
    +
    +

    Calls an MCP tool and returns text result.

    +
    +
    +async def get_tools_for_llm(self, os_filter:Β strΒ |Β NoneΒ =Β None) ‑>Β List[Dict[str,Β Any]] +
    +
    +
    + +Expand source code + +
    async def get_tools_for_llm(self, os_filter: Optional[str] = None) -> List[Dict[str, Any]]:
    +    """
    +    Fetches tools from enabled MCP servers that match the OS filter.
    +    """
    +    if not MCP_AVAILABLE:
    +        return []
    +
    +    all_llm_tools = []
    +    try:
    +        mcp_config = self.config.config.get("ai", {}).get("mcp_servers", {})
    +    except Exception:
    +        return []
    +    
    +    async def _fetch(name, cfg):
    +        if not cfg.get("enabled", True): return []
    +        
    +        # Filter by OS if specified in config (primarily used for copilot strict matching)
    +        auto_os = cfg.get("auto_load_on_os")
    +        if os_filter is not None and auto_os and os_filter.lower() != auto_os.lower():
    +            return []
    +
    +        try:
    +            session = await self._ensure_connected(name, cfg)
    +            if session:
    +                if name in self.tool_cache: return self.tool_cache[name]
    +                llm_tools = await self._fetch_tools_as_openai(name, session)
    +                self.tool_cache[name] = llm_tools
    +                return llm_tools
    +        except Exception:
    +            pass
    +        return []
    +
    +    tasks = [ _fetch(name, cfg) for name, cfg in mcp_config.items() ]
    +    
    +    if tasks:
    +        results = await asyncio.gather(*tasks)
    +        for tools in results:
    +            all_llm_tools.extend(tools)
    +            
    +    return all_llm_tools
    +
    +

    Fetches tools from enabled MCP servers that match the OS filter.

    +
    +
    +async def shutdown(self) +
    +
    +
    + +Expand source code + +
    async def shutdown(self):
    +    """Close all SSE connections."""
    +    for name, data in self.sessions.items():
    +        stack = data.get("stack")
    +        if stack:
    +            await stack.aclose()
    +    self.sessions = {}
    +
    +

    Close all SSE connections.

    +
    +
    +
    +
    +
    +
    + +
    + + + diff --git a/docs/connpy/proto/index.html b/docs/connpy/proto/index.html index 0fc7ddf..573e196 100644 --- a/docs/connpy/proto/index.html +++ b/docs/connpy/proto/index.html @@ -3,7 +3,7 @@ - + connpy.proto API documentation @@ -60,7 +60,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/ai_service.html b/docs/connpy/services/ai_service.html index 0689488..b3a41c0 100644 --- a/docs/connpy/services/ai_service.html +++ b/docs/connpy/services/ai_service.html @@ -3,7 +3,7 @@ - + connpy.services.ai_service API documentation @@ -58,6 +58,104 @@ el.replaceWith(d);
    class AIService(BaseService):
         """Business logic for interacting with AI agents and LLM configurations."""
     
    +    def build_context_blocks(self, raw_bytes: bytes, cmd_byte_positions: list, node_info: dict) -> list:
    +        """Identifies command blocks in the terminal history."""
    +        blocks = []
    +        if not (cmd_byte_positions and len(cmd_byte_positions) >= 2 and raw_bytes):
    +            return blocks
    +            
    +        default_prompt = r'>$|#$|\$$|>.$|#.$|\$.$'
    +        device_prompt = node_info.get("prompt", default_prompt) if isinstance(node_info, dict) else default_prompt
    +        prompt_re_str = re.sub(r'(?<!\\)\$', '', device_prompt)
    +        try:
    +            prompt_re = re.compile(prompt_re_str)
    +        except Exception:
    +            prompt_re = re.compile(re.sub(r'(?<!\\)\$', '', default_prompt))
    +            
    +        for i in range(1, len(cmd_byte_positions)):
    +            pos, known_cmd = cmd_byte_positions[i]
    +            prev_pos = cmd_byte_positions[i-1][0]
    +            
    +            if known_cmd:
    +                prev_chunk = raw_bytes[prev_pos:pos]
    +                prev_cleaned = log_cleaner(prev_chunk.decode(errors='replace'))
    +                prev_lines = [l for l in prev_cleaned.split('\n') if l.strip()]
    +                prompt_text = prev_lines[-1].strip() if prev_lines else ""
    +                preview = f"{prompt_text}{known_cmd}" if prompt_text else known_cmd
    +                blocks.append((pos, preview[:80]))
    +            else:
    +                chunk = raw_bytes[prev_pos:pos]
    +                cleaned = log_cleaner(chunk.decode(errors='replace'))
    +                lines = [l for l in cleaned.split('\n') if l.strip()]
    +                preview = lines[-1].strip() if lines else ""
    +                
    +                if preview:
    +                    match = prompt_re.search(preview)
    +                    if match:
    +                        cmd_text = preview[match.end():].strip()
    +                        if cmd_text:
    +                            blocks.append((pos, preview[:80]))
    +        return blocks
    +
    +    def process_copilot_input(self, input_text: str, session_state: dict) -> dict:
    +        """Parses slash commands and manages session state. Returns directive dict."""
    +        text = input_text.strip()
    +        if not text.startswith('/'):
    +            return {"action": "execute", "clean_prompt": text, "overrides": {}}
    +            
    +        parts = text.split(maxsplit=1)
    +        cmd = parts[0].lower()
    +        args = parts[1] if len(parts) > 1 else ""
    +        
    +        # 1. State Commands (Persistent)
    +        if cmd == "/os":
    +            if args:
    +                session_state['os'] = args
    +                return {"action": "state_update", "message": f"OS context changed to {args}"}
    +        elif cmd == "/prompt":
    +            if args:
    +                session_state['prompt'] = args
    +                return {"action": "state_update", "message": f"Prompt regex changed to {args}"}
    +        elif cmd == "/memorize":
    +            if args:
    +                session_state['memories'].append(args)
    +                return {"action": "state_update", "message": f"Memory added: {args}"}
    +        elif cmd == "/clear":
    +            session_state['memories'] = []
    +            return {"action": "state_update", "message": "Memory cleared"}
    +            
    +        # 2. Hybrid Commands
    +        elif cmd == "/architect":
    +            if not args:
    +                session_state['persona'] = 'architect'
    +                return {"action": "state_update", "message": "Persona set to Architect"}
    +            else:
    +                return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "architect"}}
    +                
    +        elif cmd == "/engineer":
    +            if not args:
    +                session_state['persona'] = 'engineer'
    +                return {"action": "state_update", "message": "Persona set to Engineer"}
    +            else:
    +                return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "engineer"}}
    +                
    +        elif cmd == "/trust":
    +            if not args:
    +                session_state['trust_mode'] = True
    +                return {"action": "state_update", "message": "Auto-execute (trust) enabled for session"}
    +            else:
    +                return {"action": "execute", "clean_prompt": args, "overrides": {"trust": True}}
    +                
    +        elif cmd == "/untrust":
    +            if not args:
    +                session_state['trust_mode'] = False
    +                return {"action": "state_update", "message": "Auto-execute (trust) disabled for session"}
    +            else:
    +                return {"action": "execute", "clean_prompt": args, "overrides": {"trust": False}}
    +
    +        # Unknown command, execute normally
    +        return {"action": "execute", "clean_prompt": text, "overrides": {}}
    +
         def ask(self, input_text, dryrun=False, chat_history=None, status=None, debug=False, session_id=None, console=None, chunk_callback=None, confirm_handler=None, trust=False, **overrides):
             """Send a prompt to the AI agent."""
             from connpy.ai import ai
    @@ -71,6 +169,21 @@ el.replaceWith(d);
             agent = ai(self.config, console=console)
             return agent.confirm(input_text)
     
    +    def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
    +        """Ask the AI copilot for terminal assistance."""
    +        from connpy.ai import ai, run_ai_async
    +        agent = ai(self.config)
    +        future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
    +        return future.result()
    +
    +    async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
    +        """Ask the AI copilot for terminal assistance asynchronously."""
    +        from connpy.ai import ai, run_ai_async
    +        import asyncio
    +        agent = ai(self.config)
    +        future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
    +        return await asyncio.wrap_future(future)
    +
     
         def list_sessions(self):
             """Return a list of all saved AI sessions."""
    @@ -99,6 +212,40 @@ el.replaceWith(d);
             self.config.config["ai"] = settings
             self.config._saveconfig(self.config.file)
     
    +    def configure_mcp(self, name, url=None, enabled=None, auto_load_on_os=None, remove=False):
    +        """Update MCP server settings in the configuration with smart merging."""
    +        ai_settings = self.config.config.get("ai", {})
    +        mcp_servers = ai_settings.get("mcp_servers", {})
    +        
    +        if remove:
    +            if name in mcp_servers:
    +                del mcp_servers[name]
    +        else:
    +            # Get existing or new
    +            server_cfg = mcp_servers.get(name, {})
    +            
    +            # Partial updates
    +            if url is not None:
    +                server_cfg["url"] = url
    +            
    +            if enabled is not None:
    +                server_cfg["enabled"] = bool(enabled)
    +            elif "enabled" not in server_cfg:
    +                server_cfg["enabled"] = True # Default for new entries
    +                
    +            if auto_load_on_os is not None:
    +                if auto_load_on_os == "": # Explicit clear
    +                    if "auto_load_on_os" in server_cfg:
    +                        del server_cfg["auto_load_on_os"]
    +                else:
    +                    server_cfg["auto_load_on_os"] = auto_load_on_os
    +            
    +            mcp_servers[name] = server_cfg
    +            
    +        ai_settings["mcp_servers"] = mcp_servers
    +        self.config.config["ai"] = ai_settings
    +        self.config._saveconfig(self.config.file)
    +
         def load_session_data(self, session_id):
             """Load a session's raw data by ID."""
             from connpy.ai import ai
    @@ -118,6 +265,24 @@ el.replaceWith(d);
     
     

    Methods

    +
    +async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None) +
    +
    +
    + +Expand source code + +
    async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
    +    """Ask the AI copilot for terminal assistance asynchronously."""
    +    from connpy.ai import ai, run_ai_async
    +    import asyncio
    +    agent = ai(self.config)
    +    future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
    +    return await asyncio.wrap_future(future)
    +
    +

    Ask the AI copilot for terminal assistance asynchronously.

    +
    def ask(self,
    input_text,
    dryrun=False,
    chat_history=None,
    status=None,
    debug=False,
    session_id=None,
    console=None,
    chunk_callback=None,
    confirm_handler=None,
    trust=False,
    **overrides)
    @@ -134,6 +299,116 @@ el.replaceWith(d);

    Send a prompt to the AI agent.

    +
    +def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None) +
    +
    +
    + +Expand source code + +
    def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
    +    """Ask the AI copilot for terminal assistance."""
    +    from connpy.ai import ai, run_ai_async
    +    agent = ai(self.config)
    +    future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
    +    return future.result()
    +
    +

    Ask the AI copilot for terminal assistance.

    +
    +
    +def build_context_blocks(self, raw_bytes:Β bytes, cmd_byte_positions:Β list, node_info:Β dict) ‑>Β list +
    +
    +
    + +Expand source code + +
    def build_context_blocks(self, raw_bytes: bytes, cmd_byte_positions: list, node_info: dict) -> list:
    +    """Identifies command blocks in the terminal history."""
    +    blocks = []
    +    if not (cmd_byte_positions and len(cmd_byte_positions) >= 2 and raw_bytes):
    +        return blocks
    +        
    +    default_prompt = r'>$|#$|\$$|>.$|#.$|\$.$'
    +    device_prompt = node_info.get("prompt", default_prompt) if isinstance(node_info, dict) else default_prompt
    +    prompt_re_str = re.sub(r'(?<!\\)\$', '', device_prompt)
    +    try:
    +        prompt_re = re.compile(prompt_re_str)
    +    except Exception:
    +        prompt_re = re.compile(re.sub(r'(?<!\\)\$', '', default_prompt))
    +        
    +    for i in range(1, len(cmd_byte_positions)):
    +        pos, known_cmd = cmd_byte_positions[i]
    +        prev_pos = cmd_byte_positions[i-1][0]
    +        
    +        if known_cmd:
    +            prev_chunk = raw_bytes[prev_pos:pos]
    +            prev_cleaned = log_cleaner(prev_chunk.decode(errors='replace'))
    +            prev_lines = [l for l in prev_cleaned.split('\n') if l.strip()]
    +            prompt_text = prev_lines[-1].strip() if prev_lines else ""
    +            preview = f"{prompt_text}{known_cmd}" if prompt_text else known_cmd
    +            blocks.append((pos, preview[:80]))
    +        else:
    +            chunk = raw_bytes[prev_pos:pos]
    +            cleaned = log_cleaner(chunk.decode(errors='replace'))
    +            lines = [l for l in cleaned.split('\n') if l.strip()]
    +            preview = lines[-1].strip() if lines else ""
    +            
    +            if preview:
    +                match = prompt_re.search(preview)
    +                if match:
    +                    cmd_text = preview[match.end():].strip()
    +                    if cmd_text:
    +                        blocks.append((pos, preview[:80]))
    +    return blocks
    +
    +

    Identifies command blocks in the terminal history.

    +
    +
    +def configure_mcp(self, name, url=None, enabled=None, auto_load_on_os=None, remove=False) +
    +
    +
    + +Expand source code + +
    def configure_mcp(self, name, url=None, enabled=None, auto_load_on_os=None, remove=False):
    +    """Update MCP server settings in the configuration with smart merging."""
    +    ai_settings = self.config.config.get("ai", {})
    +    mcp_servers = ai_settings.get("mcp_servers", {})
    +    
    +    if remove:
    +        if name in mcp_servers:
    +            del mcp_servers[name]
    +    else:
    +        # Get existing or new
    +        server_cfg = mcp_servers.get(name, {})
    +        
    +        # Partial updates
    +        if url is not None:
    +            server_cfg["url"] = url
    +        
    +        if enabled is not None:
    +            server_cfg["enabled"] = bool(enabled)
    +        elif "enabled" not in server_cfg:
    +            server_cfg["enabled"] = True # Default for new entries
    +            
    +        if auto_load_on_os is not None:
    +            if auto_load_on_os == "": # Explicit clear
    +                if "auto_load_on_os" in server_cfg:
    +                    del server_cfg["auto_load_on_os"]
    +            else:
    +                server_cfg["auto_load_on_os"] = auto_load_on_os
    +        
    +        mcp_servers[name] = server_cfg
    +        
    +    ai_settings["mcp_servers"] = mcp_servers
    +    self.config.config["ai"] = ai_settings
    +    self.config._saveconfig(self.config.file)
    +
    +

    Update MCP server settings in the configuration with smart merging.

    +
    def configure_provider(self, provider, model=None, api_key=None)
    @@ -223,6 +498,75 @@ el.replaceWith(d);

    Load a session's raw data by ID.

    +
    +def process_copilot_input(self, input_text:Β str, session_state:Β dict) ‑>Β dict +
    +
    +
    + +Expand source code + +
    def process_copilot_input(self, input_text: str, session_state: dict) -> dict:
    +    """Parses slash commands and manages session state. Returns directive dict."""
    +    text = input_text.strip()
    +    if not text.startswith('/'):
    +        return {"action": "execute", "clean_prompt": text, "overrides": {}}
    +        
    +    parts = text.split(maxsplit=1)
    +    cmd = parts[0].lower()
    +    args = parts[1] if len(parts) > 1 else ""
    +    
    +    # 1. State Commands (Persistent)
    +    if cmd == "/os":
    +        if args:
    +            session_state['os'] = args
    +            return {"action": "state_update", "message": f"OS context changed to {args}"}
    +    elif cmd == "/prompt":
    +        if args:
    +            session_state['prompt'] = args
    +            return {"action": "state_update", "message": f"Prompt regex changed to {args}"}
    +    elif cmd == "/memorize":
    +        if args:
    +            session_state['memories'].append(args)
    +            return {"action": "state_update", "message": f"Memory added: {args}"}
    +    elif cmd == "/clear":
    +        session_state['memories'] = []
    +        return {"action": "state_update", "message": "Memory cleared"}
    +        
    +    # 2. Hybrid Commands
    +    elif cmd == "/architect":
    +        if not args:
    +            session_state['persona'] = 'architect'
    +            return {"action": "state_update", "message": "Persona set to Architect"}
    +        else:
    +            return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "architect"}}
    +            
    +    elif cmd == "/engineer":
    +        if not args:
    +            session_state['persona'] = 'engineer'
    +            return {"action": "state_update", "message": "Persona set to Engineer"}
    +        else:
    +            return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "engineer"}}
    +            
    +    elif cmd == "/trust":
    +        if not args:
    +            session_state['trust_mode'] = True
    +            return {"action": "state_update", "message": "Auto-execute (trust) enabled for session"}
    +        else:
    +            return {"action": "execute", "clean_prompt": args, "overrides": {"trust": True}}
    +            
    +    elif cmd == "/untrust":
    +        if not args:
    +            session_state['trust_mode'] = False
    +            return {"action": "state_update", "message": "Auto-execute (trust) disabled for session"}
    +        else:
    +            return {"action": "execute", "clean_prompt": args, "overrides": {"trust": False}}
    +
    +    # Unknown command, execute normally
    +    return {"action": "execute", "clean_prompt": text, "overrides": {}}
    +
    +

    Parses slash commands and manages session state. Returns directive dict.

    +

    Inherited members

      @@ -250,13 +594,18 @@ el.replaceWith(d);
      • AIService

        - @@ -265,7 +614,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/base.html b/docs/connpy/services/base.html index e72b7ab..2ff6902 100644 --- a/docs/connpy/services/base.html +++ b/docs/connpy/services/base.html @@ -3,7 +3,7 @@ - + connpy.services.base API documentation @@ -152,7 +152,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/config_service.html b/docs/connpy/services/config_service.html index df8016e..4156c44 100644 --- a/docs/connpy/services/config_service.html +++ b/docs/connpy/services/config_service.html @@ -3,7 +3,7 @@ - + connpy.services.config_service API documentation @@ -117,6 +117,10 @@ el.replaceWith(d); if not isinstance(user_styles, dict): raise InvalidConfigurationError("Theme file must be a YAML dictionary.") + # Support both direct styles and nested under 'theme' key + if "theme" in user_styles and isinstance(user_styles["theme"], dict): + user_styles = user_styles["theme"] + # Filter for valid styles only (prevent junk in config) valid_styles = {k: v for k, v in user_styles.items() if k in STYLES} @@ -174,6 +178,10 @@ el.replaceWith(d); if not isinstance(user_styles, dict): raise InvalidConfigurationError("Theme file must be a YAML dictionary.") + # Support both direct styles and nested under 'theme' key + if "theme" in user_styles and isinstance(user_styles["theme"], dict): + user_styles = user_styles["theme"] + # Filter for valid styles only (prevent junk in config) valid_styles = {k: v for k, v in user_styles.items() if k in STYLES} @@ -311,7 +319,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/context_service.html b/docs/connpy/services/context_service.html index 0a772f7..2161ebb 100644 --- a/docs/connpy/services/context_service.html +++ b/docs/connpy/services/context_service.html @@ -3,7 +3,7 @@ - + connpy.services.context_service API documentation @@ -370,7 +370,7 @@ def current_context(self) -> str: diff --git a/docs/connpy/services/exceptions.html b/docs/connpy/services/exceptions.html index 459d464..164cec5 100644 --- a/docs/connpy/services/exceptions.html +++ b/docs/connpy/services/exceptions.html @@ -3,7 +3,7 @@ - + connpy.services.exceptions API documentation @@ -268,7 +268,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/execution_service.html b/docs/connpy/services/execution_service.html index 3740b1d..a29e4c6 100644 --- a/docs/connpy/services/execution_service.html +++ b/docs/connpy/services/execution_service.html @@ -3,7 +3,7 @@ - + connpy.services.execution_service API documentation @@ -64,7 +64,7 @@ el.replaceWith(d); commands: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -112,7 +112,7 @@ el.replaceWith(d); expected: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -189,7 +189,7 @@ el.replaceWith(d); "commands": playbook["commands"], "variables": playbook.get("variables"), "parallel": options.get("parallel", parallel), - "timeout": playbook.get("timeout", options.get("timeout", 10)), + "timeout": playbook.get("timeout", options.get("timeout", 20)), "prompt": options.get("prompt"), "name": playbook.get("name", "Task") } @@ -244,7 +244,7 @@ el.replaceWith(d);

        Run a plain-text script containing one command per line.

        -def run_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 10,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β str]
        +def run_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 20,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β str]
        @@ -257,7 +257,7 @@ el.replaceWith(d); commands: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -339,7 +339,7 @@ el.replaceWith(d); "commands": playbook["commands"], "variables": playbook.get("variables"), "parallel": options.get("parallel", parallel), - "timeout": playbook.get("timeout", options.get("timeout", 10)), + "timeout": playbook.get("timeout", options.get("timeout", 20)), "prompt": options.get("prompt"), "name": playbook.get("name", "Task") } @@ -360,7 +360,7 @@ el.replaceWith(d);

        Run a structured Connpy YAML automation playbook (from path or content).

        -def test_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        expected:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 10,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β Dict[str,Β bool]]
        +def test_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        expected:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 20,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β Dict[str,Β bool]]
        @@ -374,7 +374,7 @@ el.replaceWith(d); expected: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -449,7 +449,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/import_export_service.html b/docs/connpy/services/import_export_service.html index 58e0736..4c98e1b 100644 --- a/docs/connpy/services/import_export_service.html +++ b/docs/connpy/services/import_export_service.html @@ -3,7 +3,7 @@ - + connpy.services.import_export_service API documentation @@ -361,7 +361,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/index.html b/docs/connpy/services/index.html index e77a80a..8b626a3 100644 --- a/docs/connpy/services/index.html +++ b/docs/connpy/services/index.html @@ -3,7 +3,7 @@ - + connpy.services API documentation @@ -113,6 +113,104 @@ el.replaceWith(d);
        class AIService(BaseService):
             """Business logic for interacting with AI agents and LLM configurations."""
         
        +    def build_context_blocks(self, raw_bytes: bytes, cmd_byte_positions: list, node_info: dict) -> list:
        +        """Identifies command blocks in the terminal history."""
        +        blocks = []
        +        if not (cmd_byte_positions and len(cmd_byte_positions) >= 2 and raw_bytes):
        +            return blocks
        +            
        +        default_prompt = r'>$|#$|\$$|>.$|#.$|\$.$'
        +        device_prompt = node_info.get("prompt", default_prompt) if isinstance(node_info, dict) else default_prompt
        +        prompt_re_str = re.sub(r'(?<!\\)\$', '', device_prompt)
        +        try:
        +            prompt_re = re.compile(prompt_re_str)
        +        except Exception:
        +            prompt_re = re.compile(re.sub(r'(?<!\\)\$', '', default_prompt))
        +            
        +        for i in range(1, len(cmd_byte_positions)):
        +            pos, known_cmd = cmd_byte_positions[i]
        +            prev_pos = cmd_byte_positions[i-1][0]
        +            
        +            if known_cmd:
        +                prev_chunk = raw_bytes[prev_pos:pos]
        +                prev_cleaned = log_cleaner(prev_chunk.decode(errors='replace'))
        +                prev_lines = [l for l in prev_cleaned.split('\n') if l.strip()]
        +                prompt_text = prev_lines[-1].strip() if prev_lines else ""
        +                preview = f"{prompt_text}{known_cmd}" if prompt_text else known_cmd
        +                blocks.append((pos, preview[:80]))
        +            else:
        +                chunk = raw_bytes[prev_pos:pos]
        +                cleaned = log_cleaner(chunk.decode(errors='replace'))
        +                lines = [l for l in cleaned.split('\n') if l.strip()]
        +                preview = lines[-1].strip() if lines else ""
        +                
        +                if preview:
        +                    match = prompt_re.search(preview)
        +                    if match:
        +                        cmd_text = preview[match.end():].strip()
        +                        if cmd_text:
        +                            blocks.append((pos, preview[:80]))
        +        return blocks
        +
        +    def process_copilot_input(self, input_text: str, session_state: dict) -> dict:
        +        """Parses slash commands and manages session state. Returns directive dict."""
        +        text = input_text.strip()
        +        if not text.startswith('/'):
        +            return {"action": "execute", "clean_prompt": text, "overrides": {}}
        +            
        +        parts = text.split(maxsplit=1)
        +        cmd = parts[0].lower()
        +        args = parts[1] if len(parts) > 1 else ""
        +        
        +        # 1. State Commands (Persistent)
        +        if cmd == "/os":
        +            if args:
        +                session_state['os'] = args
        +                return {"action": "state_update", "message": f"OS context changed to {args}"}
        +        elif cmd == "/prompt":
        +            if args:
        +                session_state['prompt'] = args
        +                return {"action": "state_update", "message": f"Prompt regex changed to {args}"}
        +        elif cmd == "/memorize":
        +            if args:
        +                session_state['memories'].append(args)
        +                return {"action": "state_update", "message": f"Memory added: {args}"}
        +        elif cmd == "/clear":
        +            session_state['memories'] = []
        +            return {"action": "state_update", "message": "Memory cleared"}
        +            
        +        # 2. Hybrid Commands
        +        elif cmd == "/architect":
        +            if not args:
        +                session_state['persona'] = 'architect'
        +                return {"action": "state_update", "message": "Persona set to Architect"}
        +            else:
        +                return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "architect"}}
        +                
        +        elif cmd == "/engineer":
        +            if not args:
        +                session_state['persona'] = 'engineer'
        +                return {"action": "state_update", "message": "Persona set to Engineer"}
        +            else:
        +                return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "engineer"}}
        +                
        +        elif cmd == "/trust":
        +            if not args:
        +                session_state['trust_mode'] = True
        +                return {"action": "state_update", "message": "Auto-execute (trust) enabled for session"}
        +            else:
        +                return {"action": "execute", "clean_prompt": args, "overrides": {"trust": True}}
        +                
        +        elif cmd == "/untrust":
        +            if not args:
        +                session_state['trust_mode'] = False
        +                return {"action": "state_update", "message": "Auto-execute (trust) disabled for session"}
        +            else:
        +                return {"action": "execute", "clean_prompt": args, "overrides": {"trust": False}}
        +
        +        # Unknown command, execute normally
        +        return {"action": "execute", "clean_prompt": text, "overrides": {}}
        +
             def ask(self, input_text, dryrun=False, chat_history=None, status=None, debug=False, session_id=None, console=None, chunk_callback=None, confirm_handler=None, trust=False, **overrides):
                 """Send a prompt to the AI agent."""
                 from connpy.ai import ai
        @@ -126,6 +224,21 @@ el.replaceWith(d);
                 agent = ai(self.config, console=console)
                 return agent.confirm(input_text)
         
        +    def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
        +        """Ask the AI copilot for terminal assistance."""
        +        from connpy.ai import ai, run_ai_async
        +        agent = ai(self.config)
        +        future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
        +        return future.result()
        +
        +    async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
        +        """Ask the AI copilot for terminal assistance asynchronously."""
        +        from connpy.ai import ai, run_ai_async
        +        import asyncio
        +        agent = ai(self.config)
        +        future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
        +        return await asyncio.wrap_future(future)
        +
         
             def list_sessions(self):
                 """Return a list of all saved AI sessions."""
        @@ -154,6 +267,40 @@ el.replaceWith(d);
                 self.config.config["ai"] = settings
                 self.config._saveconfig(self.config.file)
         
        +    def configure_mcp(self, name, url=None, enabled=None, auto_load_on_os=None, remove=False):
        +        """Update MCP server settings in the configuration with smart merging."""
        +        ai_settings = self.config.config.get("ai", {})
        +        mcp_servers = ai_settings.get("mcp_servers", {})
        +        
        +        if remove:
        +            if name in mcp_servers:
        +                del mcp_servers[name]
        +        else:
        +            # Get existing or new
        +            server_cfg = mcp_servers.get(name, {})
        +            
        +            # Partial updates
        +            if url is not None:
        +                server_cfg["url"] = url
        +            
        +            if enabled is not None:
        +                server_cfg["enabled"] = bool(enabled)
        +            elif "enabled" not in server_cfg:
        +                server_cfg["enabled"] = True # Default for new entries
        +                
        +            if auto_load_on_os is not None:
        +                if auto_load_on_os == "": # Explicit clear
        +                    if "auto_load_on_os" in server_cfg:
        +                        del server_cfg["auto_load_on_os"]
        +                else:
        +                    server_cfg["auto_load_on_os"] = auto_load_on_os
        +            
        +            mcp_servers[name] = server_cfg
        +            
        +        ai_settings["mcp_servers"] = mcp_servers
        +        self.config.config["ai"] = ai_settings
        +        self.config._saveconfig(self.config.file)
        +
             def load_session_data(self, session_id):
                 """Load a session's raw data by ID."""
                 from connpy.ai import ai
        @@ -173,6 +320,24 @@ el.replaceWith(d);
         

      Methods

      +
      +async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None) +
      +
      +
      + +Expand source code + +
      async def aask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
      +    """Ask the AI copilot for terminal assistance asynchronously."""
      +    from connpy.ai import ai, run_ai_async
      +    import asyncio
      +    agent = ai(self.config)
      +    future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
      +    return await asyncio.wrap_future(future)
      +
      +

      Ask the AI copilot for terminal assistance asynchronously.

      +
      def ask(self,
      input_text,
      dryrun=False,
      chat_history=None,
      status=None,
      debug=False,
      session_id=None,
      console=None,
      chunk_callback=None,
      confirm_handler=None,
      trust=False,
      **overrides)
      @@ -189,6 +354,116 @@ el.replaceWith(d);

      Send a prompt to the AI agent.

      +
      +def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None) +
      +
      +
      + +Expand source code + +
      def ask_copilot(self, terminal_buffer, user_question, node_info=None, chunk_callback=None):
      +    """Ask the AI copilot for terminal assistance."""
      +    from connpy.ai import ai, run_ai_async
      +    agent = ai(self.config)
      +    future = run_ai_async(agent.aask_copilot(terminal_buffer, user_question, node_info, chunk_callback=chunk_callback))
      +    return future.result()
      +
      +

      Ask the AI copilot for terminal assistance.

      +
      +
      +def build_context_blocks(self, raw_bytes:Β bytes, cmd_byte_positions:Β list, node_info:Β dict) ‑>Β list +
      +
      +
      + +Expand source code + +
      def build_context_blocks(self, raw_bytes: bytes, cmd_byte_positions: list, node_info: dict) -> list:
      +    """Identifies command blocks in the terminal history."""
      +    blocks = []
      +    if not (cmd_byte_positions and len(cmd_byte_positions) >= 2 and raw_bytes):
      +        return blocks
      +        
      +    default_prompt = r'>$|#$|\$$|>.$|#.$|\$.$'
      +    device_prompt = node_info.get("prompt", default_prompt) if isinstance(node_info, dict) else default_prompt
      +    prompt_re_str = re.sub(r'(?<!\\)\$', '', device_prompt)
      +    try:
      +        prompt_re = re.compile(prompt_re_str)
      +    except Exception:
      +        prompt_re = re.compile(re.sub(r'(?<!\\)\$', '', default_prompt))
      +        
      +    for i in range(1, len(cmd_byte_positions)):
      +        pos, known_cmd = cmd_byte_positions[i]
      +        prev_pos = cmd_byte_positions[i-1][0]
      +        
      +        if known_cmd:
      +            prev_chunk = raw_bytes[prev_pos:pos]
      +            prev_cleaned = log_cleaner(prev_chunk.decode(errors='replace'))
      +            prev_lines = [l for l in prev_cleaned.split('\n') if l.strip()]
      +            prompt_text = prev_lines[-1].strip() if prev_lines else ""
      +            preview = f"{prompt_text}{known_cmd}" if prompt_text else known_cmd
      +            blocks.append((pos, preview[:80]))
      +        else:
      +            chunk = raw_bytes[prev_pos:pos]
      +            cleaned = log_cleaner(chunk.decode(errors='replace'))
      +            lines = [l for l in cleaned.split('\n') if l.strip()]
      +            preview = lines[-1].strip() if lines else ""
      +            
      +            if preview:
      +                match = prompt_re.search(preview)
      +                if match:
      +                    cmd_text = preview[match.end():].strip()
      +                    if cmd_text:
      +                        blocks.append((pos, preview[:80]))
      +    return blocks
      +
      +

      Identifies command blocks in the terminal history.

      +
      +
      +def configure_mcp(self, name, url=None, enabled=None, auto_load_on_os=None, remove=False) +
      +
      +
      + +Expand source code + +
      def configure_mcp(self, name, url=None, enabled=None, auto_load_on_os=None, remove=False):
      +    """Update MCP server settings in the configuration with smart merging."""
      +    ai_settings = self.config.config.get("ai", {})
      +    mcp_servers = ai_settings.get("mcp_servers", {})
      +    
      +    if remove:
      +        if name in mcp_servers:
      +            del mcp_servers[name]
      +    else:
      +        # Get existing or new
      +        server_cfg = mcp_servers.get(name, {})
      +        
      +        # Partial updates
      +        if url is not None:
      +            server_cfg["url"] = url
      +        
      +        if enabled is not None:
      +            server_cfg["enabled"] = bool(enabled)
      +        elif "enabled" not in server_cfg:
      +            server_cfg["enabled"] = True # Default for new entries
      +            
      +        if auto_load_on_os is not None:
      +            if auto_load_on_os == "": # Explicit clear
      +                if "auto_load_on_os" in server_cfg:
      +                    del server_cfg["auto_load_on_os"]
      +            else:
      +                server_cfg["auto_load_on_os"] = auto_load_on_os
      +        
      +        mcp_servers[name] = server_cfg
      +        
      +    ai_settings["mcp_servers"] = mcp_servers
      +    self.config.config["ai"] = ai_settings
      +    self.config._saveconfig(self.config.file)
      +
      +

      Update MCP server settings in the configuration with smart merging.

      +
      def configure_provider(self, provider, model=None, api_key=None)
      @@ -278,6 +553,75 @@ el.replaceWith(d);

      Load a session's raw data by ID.

      +
      +def process_copilot_input(self, input_text:Β str, session_state:Β dict) ‑>Β dict +
      +
      +
      + +Expand source code + +
      def process_copilot_input(self, input_text: str, session_state: dict) -> dict:
      +    """Parses slash commands and manages session state. Returns directive dict."""
      +    text = input_text.strip()
      +    if not text.startswith('/'):
      +        return {"action": "execute", "clean_prompt": text, "overrides": {}}
      +        
      +    parts = text.split(maxsplit=1)
      +    cmd = parts[0].lower()
      +    args = parts[1] if len(parts) > 1 else ""
      +    
      +    # 1. State Commands (Persistent)
      +    if cmd == "/os":
      +        if args:
      +            session_state['os'] = args
      +            return {"action": "state_update", "message": f"OS context changed to {args}"}
      +    elif cmd == "/prompt":
      +        if args:
      +            session_state['prompt'] = args
      +            return {"action": "state_update", "message": f"Prompt regex changed to {args}"}
      +    elif cmd == "/memorize":
      +        if args:
      +            session_state['memories'].append(args)
      +            return {"action": "state_update", "message": f"Memory added: {args}"}
      +    elif cmd == "/clear":
      +        session_state['memories'] = []
      +        return {"action": "state_update", "message": "Memory cleared"}
      +        
      +    # 2. Hybrid Commands
      +    elif cmd == "/architect":
      +        if not args:
      +            session_state['persona'] = 'architect'
      +            return {"action": "state_update", "message": "Persona set to Architect"}
      +        else:
      +            return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "architect"}}
      +            
      +    elif cmd == "/engineer":
      +        if not args:
      +            session_state['persona'] = 'engineer'
      +            return {"action": "state_update", "message": "Persona set to Engineer"}
      +        else:
      +            return {"action": "execute", "clean_prompt": args, "overrides": {"persona": "engineer"}}
      +            
      +    elif cmd == "/trust":
      +        if not args:
      +            session_state['trust_mode'] = True
      +            return {"action": "state_update", "message": "Auto-execute (trust) enabled for session"}
      +        else:
      +            return {"action": "execute", "clean_prompt": args, "overrides": {"trust": True}}
      +            
      +    elif cmd == "/untrust":
      +        if not args:
      +            session_state['trust_mode'] = False
      +            return {"action": "state_update", "message": "Auto-execute (trust) disabled for session"}
      +        else:
      +            return {"action": "execute", "clean_prompt": args, "overrides": {"trust": False}}
      +
      +    # Unknown command, execute normally
      +    return {"action": "execute", "clean_prompt": text, "overrides": {}}
      +
      +

      Parses slash commands and manages session state. Returns directive dict.

      +

      Inherited members

        @@ -359,6 +703,10 @@ el.replaceWith(d); if not isinstance(user_styles, dict): raise InvalidConfigurationError("Theme file must be a YAML dictionary.") + # Support both direct styles and nested under 'theme' key + if "theme" in user_styles and isinstance(user_styles["theme"], dict): + user_styles = user_styles["theme"] + # Filter for valid styles only (prevent junk in config) valid_styles = {k: v for k, v in user_styles.items() if k in STYLES} @@ -416,6 +764,10 @@ el.replaceWith(d); if not isinstance(user_styles, dict): raise InvalidConfigurationError("Theme file must be a YAML dictionary.") + # Support both direct styles and nested under 'theme' key + if "theme" in user_styles and isinstance(user_styles["theme"], dict): + user_styles = user_styles["theme"] + # Filter for valid styles only (prevent junk in config) valid_styles = {k: v for k, v in user_styles.items() if k in STYLES} @@ -590,7 +942,7 @@ el.replaceWith(d); commands: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -638,7 +990,7 @@ el.replaceWith(d); expected: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -715,7 +1067,7 @@ el.replaceWith(d); "commands": playbook["commands"], "variables": playbook.get("variables"), "parallel": options.get("parallel", parallel), - "timeout": playbook.get("timeout", options.get("timeout", 10)), + "timeout": playbook.get("timeout", options.get("timeout", 20)), "prompt": options.get("prompt"), "name": playbook.get("name", "Task") } @@ -770,7 +1122,7 @@ el.replaceWith(d);

        Run a plain-text script containing one command per line.

        -def run_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 10,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β str]
        +def run_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 20,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β str]
        @@ -783,7 +1135,7 @@ el.replaceWith(d); commands: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -865,7 +1217,7 @@ el.replaceWith(d); "commands": playbook["commands"], "variables": playbook.get("variables"), "parallel": options.get("parallel", parallel), - "timeout": playbook.get("timeout", options.get("timeout", 10)), + "timeout": playbook.get("timeout", options.get("timeout", 20)), "prompt": options.get("prompt"), "name": playbook.get("name", "Task") } @@ -886,7 +1238,7 @@ el.replaceWith(d);

        Run a structured Connpy YAML automation playbook (from path or content).

        -def test_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        expected:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 10,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β Dict[str,Β bool]]
        +def test_commands(self,
        nodes_filter:Β str,
        commands:Β List[str],
        expected:Β List[str],
        variables:Β Dict[str,Β Any]Β |Β NoneΒ =Β None,
        parallel:Β intΒ =Β 10,
        timeout:Β intΒ =Β 20,
        folder:Β strΒ |Β NoneΒ =Β None,
        prompt:Β strΒ |Β NoneΒ =Β None,
        on_node_complete:Β CallableΒ |Β NoneΒ =Β None,
        logger:Β CallableΒ |Β NoneΒ =Β None,
        name:Β strΒ |Β NoneΒ =Β None) ‑>Β Dict[str,Β Dict[str,Β bool]]
        @@ -900,7 +1252,7 @@ el.replaceWith(d); expected: List[str], variables: Optional[Dict[str, Any]] = None, parallel: int = 10, - timeout: int = 10, + timeout: int = 20, folder: Optional[str] = None, prompt: Optional[str] = None, on_node_complete: Optional[Callable] = None, @@ -2231,28 +2583,47 @@ el.replaceWith(d); from rich.console import Console from rich.console import Console - buf = io.StringIO() + import queue + import threading + + q = queue.Queue() + + class QueueIO(io.StringIO): + def write(self, s): + q.put(s) + return len(s) + def flush(self): + pass + + buf = QueueIO() old_console = printer._get_console() old_err_console = printer._get_err_console() - printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_stream(buf) + def run_plugin(): + printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_stream(buf) + try: + if hasattr(module, "Entrypoint"): + module.Entrypoint(args, parser, app) + except BaseException as e: + if not isinstance(e, SystemExit): + import traceback + printer.err_console.print(traceback.format_exc()) + finally: + printer.set_thread_console(old_console) + printer.set_thread_err_console(old_err_console) + printer.set_thread_stream(None) + q.put(None) + + t = threading.Thread(target=run_plugin, daemon=True) + t.start() - try: - if hasattr(module, "Entrypoint"): - module.Entrypoint(args, parser, app) - except BaseException as e: - if not isinstance(e, SystemExit): - import traceback - printer.err_console.print(traceback.format_exc()) - finally: - printer.set_thread_console(old_console) - printer.set_thread_err_console(old_err_console) - printer.set_thread_stream(None) - - for line in buf.getvalue().splitlines(keepends=True): - yield line
    + while True: + item = q.get() + if item is None: + break + yield item

    Business logic for enabling, disabling, and listing plugins.

    Initialize the service.

    @@ -2507,28 +2878,47 @@ el.replaceWith(d); from rich.console import Console from rich.console import Console - buf = io.StringIO() + import queue + import threading + + q = queue.Queue() + + class QueueIO(io.StringIO): + def write(self, s): + q.put(s) + return len(s) + def flush(self): + pass + + buf = QueueIO() old_console = printer._get_console() old_err_console = printer._get_err_console() - printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_stream(buf) + def run_plugin(): + printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_stream(buf) + try: + if hasattr(module, "Entrypoint"): + module.Entrypoint(args, parser, app) + except BaseException as e: + if not isinstance(e, SystemExit): + import traceback + printer.err_console.print(traceback.format_exc()) + finally: + printer.set_thread_console(old_console) + printer.set_thread_err_console(old_err_console) + printer.set_thread_stream(None) + q.put(None) + + t = threading.Thread(target=run_plugin, daemon=True) + t.start() - try: - if hasattr(module, "Entrypoint"): - module.Entrypoint(args, parser, app) - except BaseException as e: - if not isinstance(e, SystemExit): - import traceback - printer.err_console.print(traceback.format_exc()) - finally: - printer.set_thread_console(old_console) - printer.set_thread_err_console(old_err_console) - printer.set_thread_stream(None) - - for line in buf.getvalue().splitlines(keepends=True): - yield line + while True: + item = q.get() + if item is None: + break + yield item
    @@ -3259,13 +3649,18 @@ el.replaceWith(d);
    • AIService

      -
        +
      • @@ -3377,7 +3772,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/node_service.html b/docs/connpy/services/node_service.html index 0a66d09..1700265 100644 --- a/docs/connpy/services/node_service.html +++ b/docs/connpy/services/node_service.html @@ -3,7 +3,7 @@ - + connpy.services.node_service API documentation @@ -786,7 +786,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/plugin_service.html b/docs/connpy/services/plugin_service.html index f9e7ddb..c99a7f7 100644 --- a/docs/connpy/services/plugin_service.html +++ b/docs/connpy/services/plugin_service.html @@ -3,7 +3,7 @@ - + connpy.services.plugin_service API documentation @@ -284,28 +284,47 @@ el.replaceWith(d); from rich.console import Console from rich.console import Console - buf = io.StringIO() + import queue + import threading + + q = queue.Queue() + + class QueueIO(io.StringIO): + def write(self, s): + q.put(s) + return len(s) + def flush(self): + pass + + buf = QueueIO() old_console = printer._get_console() old_err_console = printer._get_err_console() - printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_stream(buf) + def run_plugin(): + printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_stream(buf) + try: + if hasattr(module, "Entrypoint"): + module.Entrypoint(args, parser, app) + except BaseException as e: + if not isinstance(e, SystemExit): + import traceback + printer.err_console.print(traceback.format_exc()) + finally: + printer.set_thread_console(old_console) + printer.set_thread_err_console(old_err_console) + printer.set_thread_stream(None) + q.put(None) + + t = threading.Thread(target=run_plugin, daemon=True) + t.start() - try: - if hasattr(module, "Entrypoint"): - module.Entrypoint(args, parser, app) - except BaseException as e: - if not isinstance(e, SystemExit): - import traceback - printer.err_console.print(traceback.format_exc()) - finally: - printer.set_thread_console(old_console) - printer.set_thread_err_console(old_err_console) - printer.set_thread_stream(None) - - for line in buf.getvalue().splitlines(keepends=True): - yield line + while True: + item = q.get() + if item is None: + break + yield item

        Business logic for enabling, disabling, and listing plugins.

        Initialize the service.

        @@ -560,28 +579,47 @@ el.replaceWith(d); from rich.console import Console from rich.console import Console - buf = io.StringIO() + import queue + import threading + + q = queue.Queue() + + class QueueIO(io.StringIO): + def write(self, s): + q.put(s) + return len(s) + def flush(self): + pass + + buf = QueueIO() old_console = printer._get_console() old_err_console = printer._get_err_console() - printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) - printer.set_thread_stream(buf) + def run_plugin(): + printer.set_thread_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_err_console(Console(file=buf, theme=printer.connpy_theme, force_terminal=True)) + printer.set_thread_stream(buf) + try: + if hasattr(module, "Entrypoint"): + module.Entrypoint(args, parser, app) + except BaseException as e: + if not isinstance(e, SystemExit): + import traceback + printer.err_console.print(traceback.format_exc()) + finally: + printer.set_thread_console(old_console) + printer.set_thread_err_console(old_err_console) + printer.set_thread_stream(None) + q.put(None) + + t = threading.Thread(target=run_plugin, daemon=True) + t.start() - try: - if hasattr(module, "Entrypoint"): - module.Entrypoint(args, parser, app) - except BaseException as e: - if not isinstance(e, SystemExit): - import traceback - printer.err_console.print(traceback.format_exc()) - finally: - printer.set_thread_console(old_console) - printer.set_thread_err_console(old_err_console) - printer.set_thread_stream(None) - - for line in buf.getvalue().splitlines(keepends=True): - yield line + while True: + item = q.get() + if item is None: + break + yield item
        @@ -671,7 +709,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/profile_service.html b/docs/connpy/services/profile_service.html index 568aec0..e3f746c 100644 --- a/docs/connpy/services/profile_service.html +++ b/docs/connpy/services/profile_service.html @@ -3,7 +3,7 @@ - + connpy.services.profile_service API documentation @@ -429,7 +429,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/provider.html b/docs/connpy/services/provider.html index fa72924..aac535c 100644 --- a/docs/connpy/services/provider.html +++ b/docs/connpy/services/provider.html @@ -3,7 +3,7 @@ - + connpy.services.provider API documentation @@ -164,7 +164,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/sync_service.html b/docs/connpy/services/sync_service.html index 602c65a..aac676f 100644 --- a/docs/connpy/services/sync_service.html +++ b/docs/connpy/services/sync_service.html @@ -3,7 +3,7 @@ - + connpy.services.sync_service API documentation @@ -964,7 +964,7 @@ el.replaceWith(d); diff --git a/docs/connpy/services/system_service.html b/docs/connpy/services/system_service.html index ded62e1..95f059e 100644 --- a/docs/connpy/services/system_service.html +++ b/docs/connpy/services/system_service.html @@ -3,7 +3,7 @@ - + connpy.services.system_service API documentation @@ -325,7 +325,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/conftest.html b/docs/connpy/tests/conftest.html index 560bf76..bc7eb23 100644 --- a/docs/connpy/tests/conftest.html +++ b/docs/connpy/tests/conftest.html @@ -3,7 +3,7 @@ - + connpy.tests.conftest API documentation @@ -258,7 +258,7 @@ def tmp_config_dir(tmp_path): diff --git a/docs/connpy/tests/index.html b/docs/connpy/tests/index.html index f60c26a..bd522fc 100644 --- a/docs/connpy/tests/index.html +++ b/docs/connpy/tests/index.html @@ -3,7 +3,7 @@ - + connpy.tests API documentation @@ -48,6 +48,10 @@ el.replaceWith(d);

        Tests for connpy.ai module.

        +
        connpy.tests.test_ai_copilot
        +
        +
        +
        connpy.tests.test_capture

        Tests for connpy.core_plugins.capture

        @@ -131,6 +135,7 @@ el.replaceWith(d);
        • connpy.tests.conftest
        • connpy.tests.test_ai
        • +
        • connpy.tests.test_ai_copilot
        • connpy.tests.test_capture
        • connpy.tests.test_completion
        • connpy.tests.test_configfile
        • @@ -152,7 +157,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_ai.html b/docs/connpy/tests/test_ai.html index 25a9960..5f7835c 100644 --- a/docs/connpy/tests/test_ai.html +++ b/docs/connpy/tests/test_ai.html @@ -3,7 +3,7 @@ - + connpy.tests.test_ai API documentation @@ -1731,7 +1731,7 @@ def myai(self, ai_config, mock_litellm): diff --git a/docs/connpy/tests/test_ai_copilot.html b/docs/connpy/tests/test_ai_copilot.html new file mode 100644 index 0000000..341799c --- /dev/null +++ b/docs/connpy/tests/test_ai_copilot.html @@ -0,0 +1,315 @@ + + + + + + +connpy.tests.test_ai_copilot API documentation + + + + + + + + + + + +
          +
          +
          +

          Module connpy.tests.test_ai_copilot

          +
          +
          +
          +
          +
          +
          +
          +
          +

          Functions

          +
          +
          +def mock_acompletion() +
          +
          +
          + +Expand source code + +
          @pytest.fixture
          +def mock_acompletion():
          +    # Patch acompletion inside connpy.ai.aask_copilot
          +    with patch('litellm.acompletion') as mock:
          +        yield mock
          +
          +
          +
          +
          +def test_aask_copilot_fallback(mock_acompletion) +
          +
          +
          + +Expand source code + +
          def test_aask_copilot_fallback(mock_acompletion):
          +    agent = ai(DummyConfig())
          +    
          +    # Setup mock response for streaming
          +    class MockDelta:
          +        def __init__(self, content):
          +            self.content = content
          +            
          +    class MockChoice:
          +        def __init__(self, content):
          +            self.delta = MockDelta(content)
          +            
          +    class MockChunk:
          +        def __init__(self, content):
          +            self.choices = [MockChoice(content)]
          +            
          +    async def mock_ac(*args, **kwargs):
          +        return MockAsyncIterator([
          +            MockChunk("Here is some text response instead of tool call.")
          +        ])
          +    
          +    mock_acompletion.side_effect = mock_ac
          +    
          +    async def run_test():
          +        return await agent.aask_copilot("Router#", "What do I do?")
          +    
          +    result = asyncio.run(run_test())
          +    
          +    if result["error"]:
          +        print(f"ERROR OCCURRED: {result['error']}")
          +    
          +    assert result["error"] is None
          +    assert result["guide"] == "Here is some text response instead of tool call."
          +    assert result["risk_level"] == "low"
          +
          +
          +
          +
          +def test_aask_copilot_tool_call(mock_acompletion) +
          +
          +
          + +Expand source code + +
          def test_aask_copilot_tool_call(mock_acompletion):
          +    agent = ai(DummyConfig())
          +    
          +    # Setup mock response for streaming
          +    class MockDelta:
          +        def __init__(self, content):
          +            self.content = content
          +            
          +    class MockChoice:
          +        def __init__(self, content):
          +            self.delta = MockDelta(content)
          +            
          +    class MockChunk:
          +        def __init__(self, content):
          +            self.choices = [MockChoice(content)]
          +            
          +    # acompletion is awaited and returns an async iterator
          +    async def mock_ac(*args, **kwargs):
          +        return MockAsyncIterator([
          +            MockChunk("<guide>Check the interfaces and running config.</guide>"),
          +            MockChunk("<commands>\nshow ip int br\nshow run\n</commands>"),
          +            MockChunk("<risk>low</risk>")
          +        ])
          +    
          +    mock_acompletion.side_effect = mock_ac
          +    
          +    async def run_test():
          +        return await agent.aask_copilot("Router#", "What do I do?")
          +    
          +    result = asyncio.run(run_test())
          +    
          +    if result["error"]:
          +        print(f"ERROR OCCURRED: {result['error']}")
          +    
          +    assert result["error"] is None
          +    assert result["guide"] == "Check the interfaces and running config."
          +    assert result["risk_level"] == "low"
          +    assert result["commands"] == ["show ip int br", "show run"]
          +
          +
          +
          +
          +def test_ingress_task_interception() +
          +
          +
          + +Expand source code + +
          def test_ingress_task_interception():
          +    async def run_test():
          +        c = node("test_node", "1.2.3.4")
          +        c.mylog = MagicMock()
          +        c.mylog.getvalue.return_value = b"Some session log"
          +        c.unique = "test_node"
          +        c.host = "1.2.3.4"
          +        c.tags = {"os": "cisco_ios"}
          +        
          +        class MockStream:
          +            def __init__(self):
          +                self.data = [b"a", b"b", b"\x00", b"c", b""]
          +            async def read(self):
          +                if self.data:
          +                    return self.data.pop(0)
          +                return b""
          +            def setup(self, resize_callback):
          +                pass
          +
          +        stream = MockStream()
          +        
          +        called_copilot = False
          +        async def mock_handler(buffer, node_info, s, child_fd):
          +            nonlocal called_copilot
          +            called_copilot = True
          +            assert buffer == "Some session log"
          +            assert node_info["os"] == "cisco_ios"
          +            
          +        c.child = MagicMock()
          +        c.child.child_fd = 123
          +        c.child.after = b""
          +        c.child.buffer = b""
          +        
          +        async def mock_ingress():
          +            while True:
          +                data = await stream.read()
          +                if not data:
          +                    break
          +                
          +                if mock_handler and b'\x00' in data:
          +                    buffer = c.mylog.getvalue().decode()
          +                    node_info = {"name": getattr(c, 'unique', 'unknown'), "host": getattr(c, 'host', 'unknown')}
          +                    if isinstance(getattr(c, 'tags', None), dict):
          +                        node_info["os"] = c.tags.get("os", "unknown")
          +                    await mock_handler(buffer, node_info, stream, c.child.child_fd)
          +                    continue
          +                    
          +        await mock_ingress()
          +        assert called_copilot
          +        
          +    asyncio.run(run_test())
          +
          +
          +
          +
          +def test_logclean_ansi() +
          +
          +
          + +Expand source code + +
          def test_logclean_ansi():
          +    c = node("test_node", "1.2.3.4")
          +    raw = "Router#\x1b[K\x1b[m show ip"
          +    clean = c._logclean(raw, var=True)
          +    assert "\x1b" not in clean
          +
          +
          +
          +
          +
          +
          +

          Classes

          +
          +
          +class DummyConfig +
          +
          +
          + +Expand source code + +
          class DummyConfig:
          +    def __init__(self):
          +        self.config = {"ai": {"engineer_api_key": "test_key", "engineer_model": "test_model"}}
          +        self.defaultdir = "/tmp"
          +
          +
          +
          +
          +class MockAsyncIterator +(items) +
          +
          +
          + +Expand source code + +
          class MockAsyncIterator:
          +    def __init__(self, items):
          +        self.items = items
          +    def __aiter__(self):
          +        return self
          +    async def __anext__(self):
          +        if not self.items:
          +            raise StopAsyncIteration
          +        return self.items.pop(0)
          +
          +
          +
          +
          +
          +
          + +
          + + + diff --git a/docs/connpy/tests/test_capture.html b/docs/connpy/tests/test_capture.html index 74f5599..02093c4 100644 --- a/docs/connpy/tests/test_capture.html +++ b/docs/connpy/tests/test_capture.html @@ -3,7 +3,7 @@ - + connpy.tests.test_capture API documentation @@ -245,7 +245,7 @@ def mock_connapp(): diff --git a/docs/connpy/tests/test_completion.html b/docs/connpy/tests/test_completion.html index df93b0b..7b647dc 100644 --- a/docs/connpy/tests/test_completion.html +++ b/docs/connpy/tests/test_completion.html @@ -3,7 +3,7 @@ - + connpy.tests.test_completion API documentation @@ -257,7 +257,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_configfile.html b/docs/connpy/tests/test_configfile.html index 8fdf443..e5f058b 100644 --- a/docs/connpy/tests/test_configfile.html +++ b/docs/connpy/tests/test_configfile.html @@ -3,7 +3,7 @@ - + connpy.tests.test_configfile API documentation @@ -2005,7 +2005,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_connapp.html b/docs/connpy/tests/test_connapp.html index 6b36833..283c4f1 100644 --- a/docs/connpy/tests/test_connapp.html +++ b/docs/connpy/tests/test_connapp.html @@ -3,7 +3,7 @@ - + connpy.tests.test_connapp API documentation @@ -699,7 +699,7 @@ def test_run(mock_run_commands, app): diff --git a/docs/connpy/tests/test_core.html b/docs/connpy/tests/test_core.html index f988fd0..de71a53 100644 --- a/docs/connpy/tests/test_core.html +++ b/docs/connpy/tests/test_core.html @@ -3,7 +3,7 @@ - + connpy.tests.test_core API documentation @@ -1369,7 +1369,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_execution_service.html b/docs/connpy/tests/test_execution_service.html index fae19f0..9aa4e6a 100644 --- a/docs/connpy/tests/test_execution_service.html +++ b/docs/connpy/tests/test_execution_service.html @@ -3,7 +3,7 @@ - + connpy.tests.test_execution_service API documentation @@ -142,7 +142,7 @@ Regression: ExecutionService.test_commands currently ignores on_node_complete. diff --git a/docs/connpy/tests/test_grpc_layer.html b/docs/connpy/tests/test_grpc_layer.html index cc00645..bc00f02 100644 --- a/docs/connpy/tests/test_grpc_layer.html +++ b/docs/connpy/tests/test_grpc_layer.html @@ -3,7 +3,7 @@ - + connpy.tests.test_grpc_layer API documentation @@ -574,15 +574,15 @@ def test_interact_node_uses_passed_name(self, mock_node, servicer): @patch("select.select") def test_connect_dynamic_msg_formatting_ssm(self, mock_select, mock_read, mock_setraw, mock_getattr, mock_setattr): from connpy.grpc_layer.stubs import NodeStub - + mock_getattr.return_value = [0, 0, 0, 0, 0, 0, [0] * 32] mock_channel = MagicMock() stub = NodeStub(mock_channel, "localhost:8048") - + mock_resp = MagicMock() mock_resp.success = True - stub.stub.interact_node.return_value = iter([mock_resp]) - + mock_resp.stdout_data = b'' + stub.stub.interact_node.return_value = iter([mock_resp]) with patch("connpy.printer.success") as mock_success: with patch("sys.stdin.fileno", return_value=0): mock_select.return_value = ([], [], []) @@ -619,15 +619,15 @@ def test_interact_node_uses_passed_name(self, mock_node, servicer): @patch("select.select") def test_connect_dynamic_msg_formatting_ssm(self, mock_select, mock_read, mock_setraw, mock_getattr, mock_setattr): from connpy.grpc_layer.stubs import NodeStub - + mock_getattr.return_value = [0, 0, 0, 0, 0, 0, [0] * 32] mock_channel = MagicMock() stub = NodeStub(mock_channel, "localhost:8048") - + mock_resp = MagicMock() mock_resp.success = True - stub.stub.interact_node.return_value = iter([mock_resp]) - + mock_resp.stdout_data = b'' + stub.stub.interact_node.return_value = iter([mock_resp]) with patch("connpy.printer.success") as mock_success: with patch("sys.stdin.fileno", return_value=0): mock_select.return_value = ([], [], []) @@ -709,7 +709,7 @@ def test_connect_dynamic_msg_formatting_ssm(self, mock_select, mock_read, mock_s diff --git a/docs/connpy/tests/test_hooks.html b/docs/connpy/tests/test_hooks.html index 32ce5b0..d953297 100644 --- a/docs/connpy/tests/test_hooks.html +++ b/docs/connpy/tests/test_hooks.html @@ -3,7 +3,7 @@ - + connpy.tests.test_hooks API documentation @@ -673,7 +673,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_node_service.html b/docs/connpy/tests/test_node_service.html index dfee076..2ae41f3 100644 --- a/docs/connpy/tests/test_node_service.html +++ b/docs/connpy/tests/test_node_service.html @@ -3,7 +3,7 @@ - + connpy.tests.test_node_service API documentation @@ -178,7 +178,7 @@ Regression: connapp._mod calls add_node instead of update_node.

        diff --git a/docs/connpy/tests/test_plugins.html b/docs/connpy/tests/test_plugins.html index 9417e56..50e72d6 100644 --- a/docs/connpy/tests/test_plugins.html +++ b/docs/connpy/tests/test_plugins.html @@ -3,7 +3,7 @@ - + connpy.tests.test_plugins API documentation @@ -917,7 +917,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_printer.html b/docs/connpy/tests/test_printer.html index fd232c8..8d216ba 100644 --- a/docs/connpy/tests/test_printer.html +++ b/docs/connpy/tests/test_printer.html @@ -3,7 +3,7 @@ - + connpy.tests.test_printer API documentation @@ -459,7 +459,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_printer_concurrency.html b/docs/connpy/tests/test_printer_concurrency.html index 9d5a1a2..2624923 100644 --- a/docs/connpy/tests/test_printer_concurrency.html +++ b/docs/connpy/tests/test_printer_concurrency.html @@ -3,7 +3,7 @@ - + connpy.tests.test_printer_concurrency API documentation @@ -148,7 +148,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_profile_service.html b/docs/connpy/tests/test_profile_service.html index 83124f1..bb9a8fe 100644 --- a/docs/connpy/tests/test_profile_service.html +++ b/docs/connpy/tests/test_profile_service.html @@ -3,7 +3,7 @@ - + connpy.tests.test_profile_service API documentation @@ -192,7 +192,7 @@ Regression: ProfileService currently doesn't resolve inheritance within profiles diff --git a/docs/connpy/tests/test_provider.html b/docs/connpy/tests/test_provider.html index 3bac6b9..7d1d16d 100644 --- a/docs/connpy/tests/test_provider.html +++ b/docs/connpy/tests/test_provider.html @@ -3,7 +3,7 @@ - + connpy.tests.test_provider API documentation @@ -139,7 +139,7 @@ el.replaceWith(d); diff --git a/docs/connpy/tests/test_sync.html b/docs/connpy/tests/test_sync.html index c9a37a6..ffab87a 100644 --- a/docs/connpy/tests/test_sync.html +++ b/docs/connpy/tests/test_sync.html @@ -3,7 +3,7 @@ - + connpy.tests.test_sync API documentation @@ -354,7 +354,7 @@ def test_perform_restore(self, mock_remove, mock_dirname, mock_exists, MockZipFi diff --git a/docs/connpy/tunnels.html b/docs/connpy/tunnels.html index 383cbe7..8c1df81 100644 --- a/docs/connpy/tunnels.html +++ b/docs/connpy/tunnels.html @@ -3,7 +3,7 @@ - + connpy.tunnels API documentation @@ -94,6 +94,24 @@ el.replaceWith(d); # signal handling not supported on some loops (e.g., Windows Proactor) pass + def stop_reading(self): + """Temporarily stop reading from stdin.""" + if self._loop and self.stdin_fd is not None: + try: + self._loop.remove_reader(self.stdin_fd) + except Exception: + pass + + def start_reading(self): + """Resume reading from stdin.""" + if self._loop and self.stdin_fd is not None: + try: + # Ensure we don't add it twice + self._loop.remove_reader(self.stdin_fd) + except Exception: + pass + self._loop.add_reader(self.stdin_fd, self._read_ready) + def teardown(self): if self._loop: try: @@ -216,6 +234,44 @@ Handles terminal raw mode, async I/O, and SIGWINCH signals.

    +
    +def start_reading(self) +
    +
    +
    + +Expand source code + +
    def start_reading(self):
    +    """Resume reading from stdin."""
    +    if self._loop and self.stdin_fd is not None:
    +        try:
    +            # Ensure we don't add it twice
    +            self._loop.remove_reader(self.stdin_fd)
    +        except Exception:
    +            pass
    +        self._loop.add_reader(self.stdin_fd, self._read_ready)
    +
    +

    Resume reading from stdin.

    +
    +
    +def stop_reading(self) +
    +
    +
    + +Expand source code + +
    def stop_reading(self):
    +    """Temporarily stop reading from stdin."""
    +    if self._loop and self.stdin_fd is not None:
    +        try:
    +            self._loop.remove_reader(self.stdin_fd)
    +        except Exception:
    +            pass
    +
    +

    Temporarily stop reading from stdin.

    +
    def teardown(self)
    @@ -293,6 +349,7 @@ Handles terminal raw mode, async I/O, and SIGWINCH signals.

    self.response_queue = response_queue self.running = True self._reader_queue = asyncio.Queue() + self.copilot_queue = asyncio.Queue() self.resize_callback = None self._loop = None self.t = None @@ -309,6 +366,19 @@ Handles terminal raw mode, async I/O, and SIGWINCH signals.

    if req.cols > 0 and req.rows > 0: if self.resize_callback: self._loop.call_soon_threadsafe(self.resize_callback, req.rows, req.cols) + # Copilot dispatching + copilot_msg = {} + if getattr(req, "copilot_question", ""): + copilot_msg.update({ + "question": req.copilot_question, + "context_buffer": getattr(req, "copilot_context_buffer", ""), + "node_info_json": getattr(req, "copilot_node_info_json", "") + }) + if getattr(req, "copilot_action", ""): + copilot_msg["action"] = req.copilot_action + + if copilot_msg: + self._loop.call_soon_threadsafe(self.copilot_queue.put_nowait, copilot_msg) if req.stdin_data: self._loop.call_soon_threadsafe(self._reader_queue.put_nowait, req.stdin_data) except Exception: @@ -374,6 +444,19 @@ Bridges the blocking gRPC iterators with the async _async_interact_loop.

  • LocalStream

    -
      + @@ -460,7 +545,7 @@ Bridges the blocking gRPC iterators with the async _async_interact_loop.

      diff --git a/docs/connpy/utils.html b/docs/connpy/utils.html new file mode 100644 index 0000000..a75f527 --- /dev/null +++ b/docs/connpy/utils.html @@ -0,0 +1,130 @@ + + + + + + +connpy.utils API documentation + + + + + + + + + + + +
      +
      +
      +

      Module connpy.utils

      +
      +
      +
      +
      +
      +
      +
      +
      +

      Functions

      +
      +
      +def log_cleaner(data:Β str) ‑>Β str +
      +
      +
      + +Expand source code + +
      def log_cleaner(data: str) -> str:
      +    """
      +    Stateless utility to remove ANSI sequences and process cursor movements.
      +    """
      +    if not data:
      +        return ""
      +            
      +    lines = data.split('\n')
      +    cleaned_lines = []
      +    
      +    # Regex to capture: ANSI sequences, control characters (\r, \b, etc), and plain text chunks
      +    token_re = re.compile(r'(\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/ ]*[@-~])|\r|\b|\x7f|[\x00-\x1F]|[^\x1B\r\b\x7f\x00-\x1F]+)')
      +    
      +    for line in lines:
      +        buffer = []
      +        cursor = 0
      +        
      +        for token in token_re.findall(line):
      +            if token == '\r':
      +                cursor = 0
      +            elif token in ('\b', '\x7f'):
      +                if cursor > 0:
      +                    cursor -= 1
      +            elif token == '\x1B[D': # Left Arrow
      +                if cursor > 0:
      +                    cursor -= 1
      +            elif token == '\x1B[C': # Right Arrow
      +                if cursor < len(buffer):
      +                    cursor += 1
      +            elif token == '\x1B[K': # Clear to end of line
      +                buffer = buffer[:cursor]
      +            elif token.startswith('\x1B'):
      +                continue
      +            elif len(token) == 1 and ord(token) < 32:
      +                continue
      +            else:
      +                for char in token:
      +                    if cursor == len(buffer):
      +                        buffer.append(char)
      +                    else:
      +                        buffer[cursor] = char
      +                    cursor += 1
      +        cleaned_lines.append("".join(buffer))
      +        
      +    return "\n".join(cleaned_lines).replace('\n\n', '\n').strip()
      +
      +

      Stateless utility to remove ANSI sequences and process cursor movements.

      +
      +
      +
      +
      +
      +
      + +
      + + + diff --git a/requirements.txt b/requirements.txt index cbdd28e..e130545 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,5 +13,10 @@ protobuf>=6.31.1,<7.0.0 google-api-python-client>=2.125.0 google-auth-oauthlib>=1.2.0 google-auth-httplib2>=0.2.0 +prompt-toolkit>=3.0.0 +mcp>=1.2.0 +aiohttp>=3.9.0 +httpx>=0.27.0 +requests>=2.31.0 pytest>=8.0.0 pytest-mock>=3.12.0 diff --git a/setup.cfg b/setup.cfg index 0e85a14..9de1251 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,12 +18,16 @@ classifiers = Topic :: System :: Networking Intended Audience :: Telecommunications Industry Programming Language :: Python :: 3 + Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 Natural Language :: English Operating System :: MacOS Operating System :: Unix [options] packages = find: +python_requires = >=3.10 install_requires = rich>=13.7.1 rich-argparse>=1.4.0 @@ -40,6 +44,11 @@ install_requires = google-api-python-client>=2.125.0 google-auth-oauthlib>=1.2.0 google-auth-httplib2>=0.2.0 + prompt-toolkit>=3.0.0 + mcp>=1.2.0 + aiohttp>=3.9.0 + httpx>=0.27.0 + requests>=2.31.0 [options.entry_points] console_scripts =