add confirmation and fixes
This commit is contained in:
parent
0e34ea79c6
commit
5a1dbc04e1
@ -246,6 +246,8 @@ With the Connpy API you can run commands on devices using http requests
|
|||||||
|
|
||||||
- A JSON object with the results of the executed commands on the nodes.
|
- A JSON object with the results of the executed commands on the nodes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### 4. Ask AI
|
### 4. Ask AI
|
||||||
|
|
||||||
**Endpoint**: `/ask_ai`
|
**Endpoint**: `/ask_ai`
|
||||||
|
@ -152,6 +152,8 @@ With the Connpy API you can run commands on devices using http requests
|
|||||||
|
|
||||||
- A JSON object with the results of the executed commands on the nodes.
|
- A JSON object with the results of the executed commands on the nodes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### 4. Ask AI
|
### 4. Ask AI
|
||||||
|
|
||||||
**Endpoint**: `/ask_ai`
|
**Endpoint**: `/ask_ai`
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
__version__ = "3.2.2"
|
__version__ = "3.2.3"
|
||||||
|
|
||||||
|
115
connpy/ai.py
115
connpy/ai.py
@ -1,9 +1,11 @@
|
|||||||
import openai
|
import openai
|
||||||
|
import time
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import ast
|
import ast
|
||||||
from textwrap import dedent
|
from textwrap import dedent
|
||||||
from .core import nodes
|
from .core import nodes
|
||||||
|
from copy import deepcopy
|
||||||
|
|
||||||
class ai:
|
class ai:
|
||||||
''' This class generates a ai object. Containts all the information and methods to make requests to openAI chatGPT to run actions on the application.
|
''' This class generates a ai object. Containts all the information and methods to make requests to openAI chatGPT to run actions on the application.
|
||||||
@ -116,9 +118,34 @@ class ai:
|
|||||||
self.__prompt["command_assistant"]= """
|
self.__prompt["command_assistant"]= """
|
||||||
router1: ['show running-config']
|
router1: ['show running-config']
|
||||||
"""
|
"""
|
||||||
|
self.__prompt["confirmation_system"] = """
|
||||||
|
Please analyze the user's input and categorize it as either an affirmation or negation. Based on this analysis, respond with:
|
||||||
|
|
||||||
|
'True' if the input is an affirmation like 'do it', 'go ahead', 'sure', etc.
|
||||||
|
'False' if the input is a negation.
|
||||||
|
If the input does not fit into either of these categories, kindly express that you didn't understand and request the user to rephrase their response.
|
||||||
|
"""
|
||||||
|
self.__prompt["confirmation_user"] = "Yes go ahead!"
|
||||||
|
self.__prompt["confirmation_assistant"] = "True"
|
||||||
self.model = model
|
self.model = model
|
||||||
self.temp = temp
|
self.temp = temp
|
||||||
|
|
||||||
|
def _retry_function(self, function, max_retries, backoff_num, *args):
|
||||||
|
#Retry openai requests
|
||||||
|
retries = 0
|
||||||
|
while retries < max_retries:
|
||||||
|
try:
|
||||||
|
myfunction = function(*args)
|
||||||
|
break
|
||||||
|
except (openai.error.APIConnectionError, openai.error.RateLimitError):
|
||||||
|
wait_time = backoff_num * (2 ** retries)
|
||||||
|
time.sleep(wait_time)
|
||||||
|
retries += 1
|
||||||
|
continue
|
||||||
|
if retries == max_retries:
|
||||||
|
myfunction = False
|
||||||
|
return myfunction
|
||||||
|
|
||||||
def _clean_original_response(self, raw_response):
|
def _clean_original_response(self, raw_response):
|
||||||
#Parse response for first request to openAI GPT.
|
#Parse response for first request to openAI GPT.
|
||||||
info_dict = {}
|
info_dict = {}
|
||||||
@ -178,6 +205,14 @@ class ai:
|
|||||||
pass
|
pass
|
||||||
return info_dict
|
return info_dict
|
||||||
|
|
||||||
|
def _clean_confirmation_response(self, raw_response):
|
||||||
|
#Parse response for confirmation request to openAI GPT.
|
||||||
|
value = raw_response.strip()
|
||||||
|
if value.strip(".").lower() == "true":
|
||||||
|
value = True
|
||||||
|
elif value.strip(".").lower() == "false":
|
||||||
|
value = False
|
||||||
|
return value
|
||||||
|
|
||||||
def _get_commands(self, user_input, nodes):
|
def _get_commands(self, user_input, nodes):
|
||||||
#Send the request for commands for each device to openAI GPT.
|
#Send the request for commands for each device to openAI GPT.
|
||||||
@ -233,7 +268,51 @@ class ai:
|
|||||||
output["response"] = self._clean_original_response(output["raw_response"])
|
output["response"] = self._clean_original_response(output["raw_response"])
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def ask(self, user_input, dryrun = False, chat_history = None):
|
def _get_confirmation(self, user_input):
|
||||||
|
#Send the request to identify if user is confirming or denying the task
|
||||||
|
message = []
|
||||||
|
message.append({"role": "system", "content": dedent(self.__prompt["confirmation_system"])})
|
||||||
|
message.append({"role": "user", "content": dedent(self.__prompt["confirmation_user"])})
|
||||||
|
message.append({"role": "assistant", "content": dedent(self.__prompt["confirmation_assistant"])})
|
||||||
|
message.append({"role": "user", "content": user_input})
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model=self.model,
|
||||||
|
messages=message,
|
||||||
|
temperature=self.temp,
|
||||||
|
top_p=1
|
||||||
|
)
|
||||||
|
output = {}
|
||||||
|
output["dict_response"] = response
|
||||||
|
output["raw_response"] = response["choices"][0]["message"]["content"]
|
||||||
|
output["response"] = self._clean_confirmation_response(output["raw_response"])
|
||||||
|
return output
|
||||||
|
|
||||||
|
def confirm(self, user_input, max_retries=3, backoff_num=1):
|
||||||
|
'''
|
||||||
|
Send the user input to openAI GPT and verify if response is afirmative or negative.
|
||||||
|
|
||||||
|
### Parameters:
|
||||||
|
|
||||||
|
- user_input (str): User response confirming or denying.
|
||||||
|
|
||||||
|
### Optional Parameters:
|
||||||
|
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
|
||||||
|
### Returns:
|
||||||
|
|
||||||
|
bool or str: True, False or str if AI coudn't understand the response
|
||||||
|
'''
|
||||||
|
result = self._retry_function(self._get_confirmation, max_retries, backoff_num, user_input)
|
||||||
|
if result:
|
||||||
|
output = result["response"]
|
||||||
|
else:
|
||||||
|
output = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
|
|
||||||
|
def ask(self, user_input, dryrun = False, chat_history = None, max_retries=3, backoff_num=1):
|
||||||
'''
|
'''
|
||||||
Send the user input to openAI GPT and parse the response to run an action in the application.
|
Send the user input to openAI GPT and parse the response to run an action in the application.
|
||||||
|
|
||||||
@ -250,9 +329,13 @@ class ai:
|
|||||||
|
|
||||||
### Optional Parameters:
|
### Optional Parameters:
|
||||||
|
|
||||||
- dryrun (bool): Set to true to get the arguments to use to run
|
- dryrun (bool): Set to true to get the arguments to use to
|
||||||
in the app. Default is false and it will run
|
run in the app. Default is false and it
|
||||||
the actions directly.
|
will run the actions directly.
|
||||||
|
- chat_history (list): List in gpt api format for the chat history.
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
|
||||||
### Returns:
|
### Returns:
|
||||||
|
|
||||||
@ -279,7 +362,11 @@ class ai:
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
output = {}
|
output = {}
|
||||||
original = self._get_filter(user_input, chat_history)
|
original = self._retry_function(self._get_filter, max_retries, backoff_num, user_input, chat_history)
|
||||||
|
if not original:
|
||||||
|
output["app_related"] = False
|
||||||
|
output["response"] = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
output["input"] = user_input
|
output["input"] = user_input
|
||||||
output["app_related"] = original["response"]["app_related"]
|
output["app_related"] = original["response"]["app_related"]
|
||||||
output["dryrun"] = dryrun
|
output["dryrun"] = dryrun
|
||||||
@ -301,7 +388,12 @@ class ai:
|
|||||||
if not type == "command":
|
if not type == "command":
|
||||||
output["action"] = "list_nodes"
|
output["action"] = "list_nodes"
|
||||||
else:
|
else:
|
||||||
commands = self._get_commands(user_input, thisnodes)
|
commands = self._retry_function(self._get_commands, max_retries, backoff_num, user_input, thisnodes)
|
||||||
|
if not commands:
|
||||||
|
output = []
|
||||||
|
output["app_related"] = False
|
||||||
|
output["response"] = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
output["args"] = {}
|
output["args"] = {}
|
||||||
output["args"]["commands"] = commands["response"]["commands"]
|
output["args"]["commands"] = commands["response"]["commands"]
|
||||||
output["args"]["vars"] = commands["response"]["variables"]
|
output["args"]["vars"] = commands["response"]["variables"]
|
||||||
@ -310,10 +402,21 @@ class ai:
|
|||||||
output["action"] = "test"
|
output["action"] = "test"
|
||||||
else:
|
else:
|
||||||
output["action"] = "run"
|
output["action"] = "run"
|
||||||
|
if dryrun:
|
||||||
|
output["task"] = []
|
||||||
|
if output["action"] == "test":
|
||||||
|
output["task"].append({"Task": "Verify if expected value is in command(s) output"})
|
||||||
|
output["task"].append({"Expected value to verify": output["args"]["expected"]})
|
||||||
|
elif output["action"] == "run":
|
||||||
|
output["task"].append({"Task": "Run command(s) on devices and return output"})
|
||||||
|
varstocommands = deepcopy(output["args"]["vars"])
|
||||||
|
del varstocommands["__global__"]
|
||||||
|
output["task"].append({"Devices": varstocommands})
|
||||||
if not dryrun:
|
if not dryrun:
|
||||||
mynodes = nodes(self.config.getitems(output["nodes"]),config=self.config)
|
mynodes = nodes(self.config.getitems(output["nodes"]),config=self.config)
|
||||||
if output["action"] == "test":
|
if output["action"] == "test":
|
||||||
output["result"] = mynodes.test(**output["args"])
|
output["result"] = mynodes.test(**output["args"])
|
||||||
|
output["logs"] = mynodes.output
|
||||||
elif output["action"] == "run":
|
elif output["action"] == "run":
|
||||||
output["result"] = mynodes.run(**output["args"])
|
output["result"] = mynodes.run(**output["args"])
|
||||||
return output
|
return output
|
||||||
|
@ -70,6 +70,13 @@ def ask_ai():
|
|||||||
ai = myai(conf)
|
ai = myai(conf)
|
||||||
return ai.ask(input, dryrun, chat_history)
|
return ai.ask(input, dryrun, chat_history)
|
||||||
|
|
||||||
|
@app.route("/confirm", methods=["POST"])
|
||||||
|
def confirm():
|
||||||
|
conf = app.custom_config
|
||||||
|
data = request.get_json()
|
||||||
|
input = data["input"]
|
||||||
|
ai = myai(conf)
|
||||||
|
return str(ai.confirm(input))
|
||||||
|
|
||||||
@app.route("/run_commands", methods=["POST"])
|
@app.route("/run_commands", methods=["POST"])
|
||||||
def run_commands():
|
def run_commands():
|
||||||
@ -99,7 +106,7 @@ def run_commands():
|
|||||||
|
|
||||||
mynodes = nodes(mynodes, config=conf)
|
mynodes = nodes(mynodes, config=conf)
|
||||||
try:
|
try:
|
||||||
args["vars"] = data["variables"]
|
args["vars"] = data["vars"]
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
@ -111,7 +118,9 @@ def run_commands():
|
|||||||
if action == "run":
|
if action == "run":
|
||||||
output = mynodes.run(**args)
|
output = mynodes.run(**args)
|
||||||
elif action == "test":
|
elif action == "test":
|
||||||
output = mynodes.test(**args)
|
output = {}
|
||||||
|
output["result"] = mynodes.test(**args)
|
||||||
|
output["output"] = mynodes.output
|
||||||
else:
|
else:
|
||||||
error = "Wrong action '{}'".format(action)
|
error = "Wrong action '{}'".format(action)
|
||||||
return({"DataError": error})
|
return({"DataError": error})
|
||||||
|
@ -147,6 +147,7 @@ options:
|
|||||||
<ul>
|
<ul>
|
||||||
<li>A JSON object with the results of the executed commands on the nodes.</li>
|
<li>A JSON object with the results of the executed commands on the nodes.</li>
|
||||||
</ul>
|
</ul>
|
||||||
|
<hr>
|
||||||
<h3 id="4-ask-ai">4. Ask AI</h3>
|
<h3 id="4-ask-ai">4. Ask AI</h3>
|
||||||
<p><strong>Endpoint</strong>: <code>/ask_ai</code></p>
|
<p><strong>Endpoint</strong>: <code>/ask_ai</code></p>
|
||||||
<p><strong>Method</strong>: <code>POST</code></p>
|
<p><strong>Method</strong>: <code>POST</code></p>
|
||||||
@ -401,6 +402,8 @@ With the Connpy API you can run commands on devices using http requests
|
|||||||
|
|
||||||
- A JSON object with the results of the executed commands on the nodes.
|
- A JSON object with the results of the executed commands on the nodes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
### 4. Ask AI
|
### 4. Ask AI
|
||||||
|
|
||||||
**Endpoint**: `/ask_ai`
|
**Endpoint**: `/ask_ai`
|
||||||
@ -682,9 +685,34 @@ __pdoc__ = {
|
|||||||
self.__prompt["command_assistant"]= """
|
self.__prompt["command_assistant"]= """
|
||||||
router1: ['show running-config']
|
router1: ['show running-config']
|
||||||
"""
|
"""
|
||||||
|
self.__prompt["confirmation_system"] = """
|
||||||
|
Please analyze the user's input and categorize it as either an affirmation or negation. Based on this analysis, respond with:
|
||||||
|
|
||||||
|
'True' if the input is an affirmation like 'do it', 'go ahead', 'sure', etc.
|
||||||
|
'False' if the input is a negation.
|
||||||
|
If the input does not fit into either of these categories, kindly express that you didn't understand and request the user to rephrase their response.
|
||||||
|
"""
|
||||||
|
self.__prompt["confirmation_user"] = "Yes go ahead!"
|
||||||
|
self.__prompt["confirmation_assistant"] = "True"
|
||||||
self.model = model
|
self.model = model
|
||||||
self.temp = temp
|
self.temp = temp
|
||||||
|
|
||||||
|
def _retry_function(self, function, max_retries, backoff_num, *args):
|
||||||
|
#Retry openai requests
|
||||||
|
retries = 0
|
||||||
|
while retries < max_retries:
|
||||||
|
try:
|
||||||
|
myfunction = function(*args)
|
||||||
|
break
|
||||||
|
except (openai.error.APIConnectionError, openai.error.RateLimitError):
|
||||||
|
wait_time = backoff_num * (2 ** retries)
|
||||||
|
time.sleep(wait_time)
|
||||||
|
retries += 1
|
||||||
|
continue
|
||||||
|
if retries == max_retries:
|
||||||
|
myfunction = False
|
||||||
|
return myfunction
|
||||||
|
|
||||||
def _clean_original_response(self, raw_response):
|
def _clean_original_response(self, raw_response):
|
||||||
#Parse response for first request to openAI GPT.
|
#Parse response for first request to openAI GPT.
|
||||||
info_dict = {}
|
info_dict = {}
|
||||||
@ -744,6 +772,14 @@ __pdoc__ = {
|
|||||||
pass
|
pass
|
||||||
return info_dict
|
return info_dict
|
||||||
|
|
||||||
|
def _clean_confirmation_response(self, raw_response):
|
||||||
|
#Parse response for confirmation request to openAI GPT.
|
||||||
|
value = raw_response.strip()
|
||||||
|
if value.strip(".").lower() == "true":
|
||||||
|
value = True
|
||||||
|
elif value.strip(".").lower() == "false":
|
||||||
|
value = False
|
||||||
|
return value
|
||||||
|
|
||||||
def _get_commands(self, user_input, nodes):
|
def _get_commands(self, user_input, nodes):
|
||||||
#Send the request for commands for each device to openAI GPT.
|
#Send the request for commands for each device to openAI GPT.
|
||||||
@ -799,7 +835,51 @@ __pdoc__ = {
|
|||||||
output["response"] = self._clean_original_response(output["raw_response"])
|
output["response"] = self._clean_original_response(output["raw_response"])
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def ask(self, user_input, dryrun = False, chat_history = None):
|
def _get_confirmation(self, user_input):
|
||||||
|
#Send the request to identify if user is confirming or denying the task
|
||||||
|
message = []
|
||||||
|
message.append({"role": "system", "content": dedent(self.__prompt["confirmation_system"])})
|
||||||
|
message.append({"role": "user", "content": dedent(self.__prompt["confirmation_user"])})
|
||||||
|
message.append({"role": "assistant", "content": dedent(self.__prompt["confirmation_assistant"])})
|
||||||
|
message.append({"role": "user", "content": user_input})
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model=self.model,
|
||||||
|
messages=message,
|
||||||
|
temperature=self.temp,
|
||||||
|
top_p=1
|
||||||
|
)
|
||||||
|
output = {}
|
||||||
|
output["dict_response"] = response
|
||||||
|
output["raw_response"] = response["choices"][0]["message"]["content"]
|
||||||
|
output["response"] = self._clean_confirmation_response(output["raw_response"])
|
||||||
|
return output
|
||||||
|
|
||||||
|
def confirm(self, user_input, max_retries=3, backoff_num=1):
|
||||||
|
'''
|
||||||
|
Send the user input to openAI GPT and verify if response is afirmative or negative.
|
||||||
|
|
||||||
|
### Parameters:
|
||||||
|
|
||||||
|
- user_input (str): User response confirming or denying.
|
||||||
|
|
||||||
|
### Optional Parameters:
|
||||||
|
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
|
||||||
|
### Returns:
|
||||||
|
|
||||||
|
bool or str: True, False or str if AI coudn't understand the response
|
||||||
|
'''
|
||||||
|
result = self._retry_function(self._get_confirmation, max_retries, backoff_num, user_input)
|
||||||
|
if result:
|
||||||
|
output = result["response"]
|
||||||
|
else:
|
||||||
|
output = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
|
|
||||||
|
def ask(self, user_input, dryrun = False, chat_history = None, max_retries=3, backoff_num=1):
|
||||||
'''
|
'''
|
||||||
Send the user input to openAI GPT and parse the response to run an action in the application.
|
Send the user input to openAI GPT and parse the response to run an action in the application.
|
||||||
|
|
||||||
@ -816,9 +896,13 @@ __pdoc__ = {
|
|||||||
|
|
||||||
### Optional Parameters:
|
### Optional Parameters:
|
||||||
|
|
||||||
- dryrun (bool): Set to true to get the arguments to use to run
|
- dryrun (bool): Set to true to get the arguments to use to
|
||||||
in the app. Default is false and it will run
|
run in the app. Default is false and it
|
||||||
the actions directly.
|
will run the actions directly.
|
||||||
|
- chat_history (list): List in gpt api format for the chat history.
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
|
||||||
### Returns:
|
### Returns:
|
||||||
|
|
||||||
@ -845,7 +929,11 @@ __pdoc__ = {
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
output = {}
|
output = {}
|
||||||
original = self._get_filter(user_input, chat_history)
|
original = self._retry_function(self._get_filter, max_retries, backoff_num, user_input, chat_history)
|
||||||
|
if not original:
|
||||||
|
output["app_related"] = False
|
||||||
|
output["response"] = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
output["input"] = user_input
|
output["input"] = user_input
|
||||||
output["app_related"] = original["response"]["app_related"]
|
output["app_related"] = original["response"]["app_related"]
|
||||||
output["dryrun"] = dryrun
|
output["dryrun"] = dryrun
|
||||||
@ -867,7 +955,12 @@ __pdoc__ = {
|
|||||||
if not type == "command":
|
if not type == "command":
|
||||||
output["action"] = "list_nodes"
|
output["action"] = "list_nodes"
|
||||||
else:
|
else:
|
||||||
commands = self._get_commands(user_input, thisnodes)
|
commands = self._retry_function(self._get_commands, max_retries, backoff_num, user_input, thisnodes)
|
||||||
|
if not commands:
|
||||||
|
output = []
|
||||||
|
output["app_related"] = False
|
||||||
|
output["response"] = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
output["args"] = {}
|
output["args"] = {}
|
||||||
output["args"]["commands"] = commands["response"]["commands"]
|
output["args"]["commands"] = commands["response"]["commands"]
|
||||||
output["args"]["vars"] = commands["response"]["variables"]
|
output["args"]["vars"] = commands["response"]["variables"]
|
||||||
@ -876,10 +969,21 @@ __pdoc__ = {
|
|||||||
output["action"] = "test"
|
output["action"] = "test"
|
||||||
else:
|
else:
|
||||||
output["action"] = "run"
|
output["action"] = "run"
|
||||||
|
if dryrun:
|
||||||
|
output["task"] = []
|
||||||
|
if output["action"] == "test":
|
||||||
|
output["task"].append({"Task": "Verify if expected value is in command(s) output"})
|
||||||
|
output["task"].append({"Expected value to verify": output["args"]["expected"]})
|
||||||
|
elif output["action"] == "run":
|
||||||
|
output["task"].append({"Task": "Run command(s) on devices and return output"})
|
||||||
|
varstocommands = deepcopy(output["args"]["vars"])
|
||||||
|
del varstocommands["__global__"]
|
||||||
|
output["task"].append({"Devices": varstocommands})
|
||||||
if not dryrun:
|
if not dryrun:
|
||||||
mynodes = nodes(self.config.getitems(output["nodes"]),config=self.config)
|
mynodes = nodes(self.config.getitems(output["nodes"]),config=self.config)
|
||||||
if output["action"] == "test":
|
if output["action"] == "test":
|
||||||
output["result"] = mynodes.test(**output["args"])
|
output["result"] = mynodes.test(**output["args"])
|
||||||
|
output["logs"] = mynodes.output
|
||||||
elif output["action"] == "run":
|
elif output["action"] == "run":
|
||||||
output["result"] = mynodes.run(**output["args"])
|
output["result"] = mynodes.run(**output["args"])
|
||||||
return output</code></pre>
|
return output</code></pre>
|
||||||
@ -887,7 +991,7 @@ __pdoc__ = {
|
|||||||
<h3>Methods</h3>
|
<h3>Methods</h3>
|
||||||
<dl>
|
<dl>
|
||||||
<dt id="connpy.ai.ask"><code class="name flex">
|
<dt id="connpy.ai.ask"><code class="name flex">
|
||||||
<span>def <span class="ident">ask</span></span>(<span>self, user_input, dryrun=False, chat_history=None)</span>
|
<span>def <span class="ident">ask</span></span>(<span>self, user_input, dryrun=False, chat_history=None, max_retries=3, backoff_num=1)</span>
|
||||||
</code></dt>
|
</code></dt>
|
||||||
<dd>
|
<dd>
|
||||||
<div class="desc"><p>Send the user input to openAI GPT and parse the response to run an action in the application.</p>
|
<div class="desc"><p>Send the user input to openAI GPT and parse the response to run an action in the application.</p>
|
||||||
@ -902,9 +1006,13 @@ __pdoc__ = {
|
|||||||
expected value.
|
expected value.
|
||||||
</code></pre>
|
</code></pre>
|
||||||
<h3 id="optional-parameters">Optional Parameters:</h3>
|
<h3 id="optional-parameters">Optional Parameters:</h3>
|
||||||
<pre><code>- dryrun (bool): Set to true to get the arguments to use to run
|
<pre><code>- dryrun (bool): Set to true to get the arguments to use to
|
||||||
in the app. Default is false and it will run
|
run in the app. Default is false and it
|
||||||
the actions directly.
|
will run the actions directly.
|
||||||
|
- chat_history (list): List in gpt api format for the chat history.
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
</code></pre>
|
</code></pre>
|
||||||
<h3 id="returns">Returns:</h3>
|
<h3 id="returns">Returns:</h3>
|
||||||
<pre><code>dict: Dictionary formed with the following keys:
|
<pre><code>dict: Dictionary formed with the following keys:
|
||||||
@ -930,7 +1038,7 @@ __pdoc__ = {
|
|||||||
<summary>
|
<summary>
|
||||||
<span>Expand source code</span>
|
<span>Expand source code</span>
|
||||||
</summary>
|
</summary>
|
||||||
<pre><code class="python">def ask(self, user_input, dryrun = False, chat_history = None):
|
<pre><code class="python">def ask(self, user_input, dryrun = False, chat_history = None, max_retries=3, backoff_num=1):
|
||||||
'''
|
'''
|
||||||
Send the user input to openAI GPT and parse the response to run an action in the application.
|
Send the user input to openAI GPT and parse the response to run an action in the application.
|
||||||
|
|
||||||
@ -947,9 +1055,13 @@ __pdoc__ = {
|
|||||||
|
|
||||||
### Optional Parameters:
|
### Optional Parameters:
|
||||||
|
|
||||||
- dryrun (bool): Set to true to get the arguments to use to run
|
- dryrun (bool): Set to true to get the arguments to use to
|
||||||
in the app. Default is false and it will run
|
run in the app. Default is false and it
|
||||||
the actions directly.
|
will run the actions directly.
|
||||||
|
- chat_history (list): List in gpt api format for the chat history.
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
|
||||||
### Returns:
|
### Returns:
|
||||||
|
|
||||||
@ -976,7 +1088,11 @@ __pdoc__ = {
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
output = {}
|
output = {}
|
||||||
original = self._get_filter(user_input, chat_history)
|
original = self._retry_function(self._get_filter, max_retries, backoff_num, user_input, chat_history)
|
||||||
|
if not original:
|
||||||
|
output["app_related"] = False
|
||||||
|
output["response"] = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
output["input"] = user_input
|
output["input"] = user_input
|
||||||
output["app_related"] = original["response"]["app_related"]
|
output["app_related"] = original["response"]["app_related"]
|
||||||
output["dryrun"] = dryrun
|
output["dryrun"] = dryrun
|
||||||
@ -998,7 +1114,12 @@ __pdoc__ = {
|
|||||||
if not type == "command":
|
if not type == "command":
|
||||||
output["action"] = "list_nodes"
|
output["action"] = "list_nodes"
|
||||||
else:
|
else:
|
||||||
commands = self._get_commands(user_input, thisnodes)
|
commands = self._retry_function(self._get_commands, max_retries, backoff_num, user_input, thisnodes)
|
||||||
|
if not commands:
|
||||||
|
output = []
|
||||||
|
output["app_related"] = False
|
||||||
|
output["response"] = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output
|
||||||
output["args"] = {}
|
output["args"] = {}
|
||||||
output["args"]["commands"] = commands["response"]["commands"]
|
output["args"]["commands"] = commands["response"]["commands"]
|
||||||
output["args"]["vars"] = commands["response"]["variables"]
|
output["args"]["vars"] = commands["response"]["variables"]
|
||||||
@ -1007,15 +1128,72 @@ __pdoc__ = {
|
|||||||
output["action"] = "test"
|
output["action"] = "test"
|
||||||
else:
|
else:
|
||||||
output["action"] = "run"
|
output["action"] = "run"
|
||||||
|
if dryrun:
|
||||||
|
output["task"] = []
|
||||||
|
if output["action"] == "test":
|
||||||
|
output["task"].append({"Task": "Verify if expected value is in command(s) output"})
|
||||||
|
output["task"].append({"Expected value to verify": output["args"]["expected"]})
|
||||||
|
elif output["action"] == "run":
|
||||||
|
output["task"].append({"Task": "Run command(s) on devices and return output"})
|
||||||
|
varstocommands = deepcopy(output["args"]["vars"])
|
||||||
|
del varstocommands["__global__"]
|
||||||
|
output["task"].append({"Devices": varstocommands})
|
||||||
if not dryrun:
|
if not dryrun:
|
||||||
mynodes = nodes(self.config.getitems(output["nodes"]),config=self.config)
|
mynodes = nodes(self.config.getitems(output["nodes"]),config=self.config)
|
||||||
if output["action"] == "test":
|
if output["action"] == "test":
|
||||||
output["result"] = mynodes.test(**output["args"])
|
output["result"] = mynodes.test(**output["args"])
|
||||||
|
output["logs"] = mynodes.output
|
||||||
elif output["action"] == "run":
|
elif output["action"] == "run":
|
||||||
output["result"] = mynodes.run(**output["args"])
|
output["result"] = mynodes.run(**output["args"])
|
||||||
return output</code></pre>
|
return output</code></pre>
|
||||||
</details>
|
</details>
|
||||||
</dd>
|
</dd>
|
||||||
|
<dt id="connpy.ai.confirm"><code class="name flex">
|
||||||
|
<span>def <span class="ident">confirm</span></span>(<span>self, user_input, max_retries=3, backoff_num=1)</span>
|
||||||
|
</code></dt>
|
||||||
|
<dd>
|
||||||
|
<div class="desc"><p>Send the user input to openAI GPT and verify if response is afirmative or negative.</p>
|
||||||
|
<h3 id="parameters">Parameters:</h3>
|
||||||
|
<pre><code>- user_input (str): User response confirming or denying.
|
||||||
|
</code></pre>
|
||||||
|
<h3 id="optional-parameters">Optional Parameters:</h3>
|
||||||
|
<pre><code>- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
</code></pre>
|
||||||
|
<h3 id="returns">Returns:</h3>
|
||||||
|
<pre><code>bool or str: True, False or str if AI coudn't understand the response
|
||||||
|
</code></pre></div>
|
||||||
|
<details class="source">
|
||||||
|
<summary>
|
||||||
|
<span>Expand source code</span>
|
||||||
|
</summary>
|
||||||
|
<pre><code class="python">def confirm(self, user_input, max_retries=3, backoff_num=1):
|
||||||
|
'''
|
||||||
|
Send the user input to openAI GPT and verify if response is afirmative or negative.
|
||||||
|
|
||||||
|
### Parameters:
|
||||||
|
|
||||||
|
- user_input (str): User response confirming or denying.
|
||||||
|
|
||||||
|
### Optional Parameters:
|
||||||
|
|
||||||
|
- max_retries (int): Maximum number of retries for gpt api.
|
||||||
|
- backoff_num (int): Backoff factor for exponential wait time
|
||||||
|
between retries.
|
||||||
|
|
||||||
|
### Returns:
|
||||||
|
|
||||||
|
bool or str: True, False or str if AI coudn't understand the response
|
||||||
|
'''
|
||||||
|
result = self._retry_function(self._get_confirmation, max_retries, backoff_num, user_input)
|
||||||
|
if result:
|
||||||
|
output = result["response"]
|
||||||
|
else:
|
||||||
|
output = f"{self.model} api is not responding right now, please try again later."
|
||||||
|
return output</code></pre>
|
||||||
|
</details>
|
||||||
|
</dd>
|
||||||
</dl>
|
</dl>
|
||||||
</dd>
|
</dd>
|
||||||
<dt id="connpy.configfile"><code class="flex name class">
|
<dt id="connpy.configfile"><code class="flex name class">
|
||||||
@ -4171,6 +4349,7 @@ tasks:
|
|||||||
<h4><code><a title="connpy.ai" href="#connpy.ai">ai</a></code></h4>
|
<h4><code><a title="connpy.ai" href="#connpy.ai">ai</a></code></h4>
|
||||||
<ul class="">
|
<ul class="">
|
||||||
<li><code><a title="connpy.ai.ask" href="#connpy.ai.ask">ask</a></code></li>
|
<li><code><a title="connpy.ai.ask" href="#connpy.ai.ask">ask</a></code></li>
|
||||||
|
<li><code><a title="connpy.ai.confirm" href="#connpy.ai.confirm">confirm</a></code></li>
|
||||||
</ul>
|
</ul>
|
||||||
</li>
|
</li>
|
||||||
<li>
|
<li>
|
||||||
|
Loading…
Reference in New Issue
Block a user