This commit is contained in:
2025-07-11 22:31:42 -03:00
parent e2e4c9bfe7
commit c3f9f75f70
3 changed files with 127 additions and 91 deletions

View File

@@ -1,2 +1,2 @@
__version__ = "4.1.3" __version__ = "4.1.4"

View File

@@ -1,4 +1,4 @@
import openai from openai import OpenAI
import time import time
import json import json
import re import re
@@ -22,7 +22,7 @@ class ai:
''' '''
def __init__(self, config, org = None, api_key = None, model = None, temp = 0.7): def __init__(self, config, org = None, api_key = None, model = None):
''' '''
### Parameters: ### Parameters:
@@ -48,28 +48,24 @@ class ai:
''' '''
self.config = config self.config = config
if org:
openai.organization = org
else:
try: try:
openai.organization = self.config.config["openai"]["organization"] final_api_key = api_key if api_key else self.config.config["openai"]["api_key"]
except: except Exception:
raise ValueError("Missing openai organization")
if api_key:
openai.api_key = api_key
else:
try:
openai.api_key = self.config.config["openai"]["api_key"]
except:
raise ValueError("Missing openai api_key") raise ValueError("Missing openai api_key")
try:
final_org = org if org else self.config.config["openai"]["organization"]
except Exception:
raise ValueError("Missing openai organization")
self.client = OpenAI(api_key=final_api_key, organization=final_org)
if model: if model:
self.model = model self.model = model
else: else:
try: try:
self.model = self.config.config["openai"]["model"] self.model = self.config.config["openai"]["model"]
except: except:
self.model = "gpt-4o-mini" self.model = "o4-mini"
self.temp = temp
self.__prompt = {} self.__prompt = {}
self.__prompt["original_system"] = """ self.__prompt["original_system"] = """
You are the AI chatbot and assistant of a network connection manager and automation app called connpy. When provided with user input analyze the input and extract the following information. If user wants to chat just reply and don't call a function: You are the AI chatbot and assistant of a network connection manager and automation app called connpy. When provided with user input analyze the input and extract the following information. If user wants to chat just reply and don't call a function:
@@ -249,17 +245,22 @@ Categorize the user's request based on the operation they want to perform on the
message.append({"role": "assistant", "content": None, "function_call": self.__prompt["command_assistant"]}) message.append({"role": "assistant", "content": None, "function_call": self.__prompt["command_assistant"]})
message.append({"role": "user", "content": command_input}) message.append({"role": "user", "content": command_input})
functions = [command_function] functions = [command_function]
response = openai.ChatCompletion.create( response = self.client.chat.completions.create(
model=self.model, model=self.model,
messages=message, messages=message,
functions=functions, functions=functions,
function_call={"name": "get_commands"}, function_call={"name": "get_commands"},
temperature=self.temp
) )
output = {} output = {}
result = response["choices"][0]["message"].to_dict() msg = response.choices[0].message # Es un objeto ChatCompletionMessage
json_result = json.loads(result["function_call"]["arguments"])
# Puede que function_call sea None. Verificá primero.
if msg.function_call and msg.function_call.arguments:
json_result = json.loads(msg.function_call.arguments)
output["response"] = self._clean_command_response(json_result, node_list) output["response"] = self._clean_command_response(json_result, node_list)
else:
# Manejo de error o fallback, según tu lógica
output["response"] = None
return output return output
@MethodHook @MethodHook
@@ -274,32 +275,45 @@ Categorize the user's request based on the operation they want to perform on the
chat_history = [] chat_history = []
chat_history.append({"role": "user", "content": user_input}) chat_history.append({"role": "user", "content": user_input})
message.extend(chat_history) message.extend(chat_history)
response = openai.ChatCompletion.create( response = self.client.chat.completions.create(
model=self.model, model=self.model,
messages=message, messages=message,
functions=functions, functions=functions,
function_call="auto", function_call="auto",
temperature=self.temp,
top_p=1 top_p=1
) )
def extract_quoted_strings(text): def extract_quoted_strings(text):
pattern = r'["\'](.*?)["\']' pattern = r'["\'](.*?)["\']'
matches = re.findall(pattern, text) matches = re.findall(pattern, text)
return matches return matches
expected = extract_quoted_strings(user_input) expected = extract_quoted_strings(user_input)
output = {} output = {}
result = response["choices"][0]["message"].to_dict() msg = response.choices[0].message # Objeto ChatCompletionMessage
if result["content"]:
if msg.content: # Si hay texto libre del modelo (caso "no app-related")
output["app_related"] = False output["app_related"] = False
chat_history.append({"role": "assistant", "content": result["content"]}) chat_history.append({"role": "assistant", "content": msg.content})
output["response"] = result["content"] output["response"] = msg.content
else: else:
json_result = json.loads(result["function_call"]["arguments"]) # Si hay function_call, es app-related
if msg.function_call and msg.function_call.arguments:
json_result = json.loads(msg.function_call.arguments)
output["app_related"] = True output["app_related"] = True
output["filter"] = json_result["filter"] output["filter"] = json_result["filter"]
output["type"] = json_result["type"] output["type"] = json_result["type"]
chat_history.append({"role": "assistant", "content": result["content"], "function_call": {"name": result["function_call"]["name"], "arguments": json.dumps(json_result)}}) chat_history.append({
"role": "assistant",
"content": msg.content,
"function_call": {
"name": msg.function_call.name,
"arguments": json.dumps(json_result)
}
})
else:
# Fallback defensivo si no hay nada
output["app_related"] = False
output["response"] = None
output["expected"] = expected output["expected"] = expected
output["chat_history"] = chat_history output["chat_history"] = chat_history
return output return output
@@ -310,23 +324,27 @@ Categorize the user's request based on the operation they want to perform on the
message = [] message = []
message.append({"role": "user", "content": user_input}) message.append({"role": "user", "content": user_input})
functions = [self.__prompt["confirmation_function"]] functions = [self.__prompt["confirmation_function"]]
response = openai.ChatCompletion.create( response = self.client.chat.completions.create(
model=self.model, model=self.model,
messages=message, messages=message,
functions=functions, functions=functions,
function_call={"name": "get_confirmation"}, function_call={"name": "get_confirmation"},
temperature=self.temp,
top_p=1 top_p=1
) )
result = response["choices"][0]["message"].to_dict() msg = response.choices[0].message # Es un objeto ChatCompletionMessage
json_result = json.loads(result["function_call"]["arguments"])
output = {} output = {}
if msg.function_call and msg.function_call.arguments:
json_result = json.loads(msg.function_call.arguments)
if json_result["result"] == "true": if json_result["result"] == "true":
output["result"] = True output["result"] = True
elif json_result["result"] == "false": elif json_result["result"] == "false":
output["result"] = False output["result"] = False
elif json_result["result"] == "none": elif json_result["result"] == "none":
output["result"] = json_result["response"] output["result"] = json_result.get("response") # .get para evitar KeyError si falta
else:
output["result"] = None # O el valor que tenga sentido para tu caso
return output return output
@MethodHook @MethodHook

View File

@@ -722,7 +722,7 @@ indicating successful verification.</p>
</dd> </dd>
<dt id="connpy.ai"><code class="flex name class"> <dt id="connpy.ai"><code class="flex name class">
<span>class <span class="ident">ai</span></span> <span>class <span class="ident">ai</span></span>
<span>(</span><span>config, org=None, api_key=None, model=None, temp=0.7)</span> <span>(</span><span>config, org=None, api_key=None, model=None)</span>
</code></dt> </code></dt>
<dd> <dd>
<details class="source"> <details class="source">
@@ -743,7 +743,7 @@ class ai:
&#39;&#39;&#39; &#39;&#39;&#39;
def __init__(self, config, org = None, api_key = None, model = None, temp = 0.7): def __init__(self, config, org = None, api_key = None, model = None):
&#39;&#39;&#39; &#39;&#39;&#39;
### Parameters: ### Parameters:
@@ -769,28 +769,24 @@ class ai:
&#39;&#39;&#39; &#39;&#39;&#39;
self.config = config self.config = config
if org:
openai.organization = org
else:
try: try:
openai.organization = self.config.config[&#34;openai&#34;][&#34;organization&#34;] final_api_key = api_key if api_key else self.config.config[&#34;openai&#34;][&#34;api_key&#34;]
except: except Exception:
raise ValueError(&#34;Missing openai organization&#34;)
if api_key:
openai.api_key = api_key
else:
try:
openai.api_key = self.config.config[&#34;openai&#34;][&#34;api_key&#34;]
except:
raise ValueError(&#34;Missing openai api_key&#34;) raise ValueError(&#34;Missing openai api_key&#34;)
try:
final_org = org if org else self.config.config[&#34;openai&#34;][&#34;organization&#34;]
except Exception:
raise ValueError(&#34;Missing openai organization&#34;)
self.client = OpenAI(api_key=final_api_key, organization=final_org)
if model: if model:
self.model = model self.model = model
else: else:
try: try:
self.model = self.config.config[&#34;openai&#34;][&#34;model&#34;] self.model = self.config.config[&#34;openai&#34;][&#34;model&#34;]
except: except:
self.model = &#34;gpt-4o-mini&#34; self.model = &#34;o4-mini&#34;
self.temp = temp
self.__prompt = {} self.__prompt = {}
self.__prompt[&#34;original_system&#34;] = &#34;&#34;&#34; self.__prompt[&#34;original_system&#34;] = &#34;&#34;&#34;
You are the AI chatbot and assistant of a network connection manager and automation app called connpy. When provided with user input analyze the input and extract the following information. If user wants to chat just reply and don&#39;t call a function: You are the AI chatbot and assistant of a network connection manager and automation app called connpy. When provided with user input analyze the input and extract the following information. If user wants to chat just reply and don&#39;t call a function:
@@ -970,17 +966,22 @@ Categorize the user&#39;s request based on the operation they want to perform on
message.append({&#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: None, &#34;function_call&#34;: self.__prompt[&#34;command_assistant&#34;]}) message.append({&#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: None, &#34;function_call&#34;: self.__prompt[&#34;command_assistant&#34;]})
message.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: command_input}) message.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: command_input})
functions = [command_function] functions = [command_function]
response = openai.ChatCompletion.create( response = self.client.chat.completions.create(
model=self.model, model=self.model,
messages=message, messages=message,
functions=functions, functions=functions,
function_call={&#34;name&#34;: &#34;get_commands&#34;}, function_call={&#34;name&#34;: &#34;get_commands&#34;},
temperature=self.temp
) )
output = {} output = {}
result = response[&#34;choices&#34;][0][&#34;message&#34;].to_dict() msg = response.choices[0].message # Es un objeto ChatCompletionMessage
json_result = json.loads(result[&#34;function_call&#34;][&#34;arguments&#34;])
# Puede que function_call sea None. Verificá primero.
if msg.function_call and msg.function_call.arguments:
json_result = json.loads(msg.function_call.arguments)
output[&#34;response&#34;] = self._clean_command_response(json_result, node_list) output[&#34;response&#34;] = self._clean_command_response(json_result, node_list)
else:
# Manejo de error o fallback, según tu lógica
output[&#34;response&#34;] = None
return output return output
@MethodHook @MethodHook
@@ -995,32 +996,45 @@ Categorize the user&#39;s request based on the operation they want to perform on
chat_history = [] chat_history = []
chat_history.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_input}) chat_history.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_input})
message.extend(chat_history) message.extend(chat_history)
response = openai.ChatCompletion.create( response = self.client.chat.completions.create(
model=self.model, model=self.model,
messages=message, messages=message,
functions=functions, functions=functions,
function_call=&#34;auto&#34;, function_call=&#34;auto&#34;,
temperature=self.temp,
top_p=1 top_p=1
) )
def extract_quoted_strings(text): def extract_quoted_strings(text):
pattern = r&#39;[&#34;\&#39;](.*?)[&#34;\&#39;]&#39; pattern = r&#39;[&#34;\&#39;](.*?)[&#34;\&#39;]&#39;
matches = re.findall(pattern, text) matches = re.findall(pattern, text)
return matches return matches
expected = extract_quoted_strings(user_input) expected = extract_quoted_strings(user_input)
output = {} output = {}
result = response[&#34;choices&#34;][0][&#34;message&#34;].to_dict() msg = response.choices[0].message # Objeto ChatCompletionMessage
if result[&#34;content&#34;]:
if msg.content: # Si hay texto libre del modelo (caso &#34;no app-related&#34;)
output[&#34;app_related&#34;] = False output[&#34;app_related&#34;] = False
chat_history.append({&#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: result[&#34;content&#34;]}) chat_history.append({&#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: msg.content})
output[&#34;response&#34;] = result[&#34;content&#34;] output[&#34;response&#34;] = msg.content
else: else:
json_result = json.loads(result[&#34;function_call&#34;][&#34;arguments&#34;]) # Si hay function_call, es app-related
if msg.function_call and msg.function_call.arguments:
json_result = json.loads(msg.function_call.arguments)
output[&#34;app_related&#34;] = True output[&#34;app_related&#34;] = True
output[&#34;filter&#34;] = json_result[&#34;filter&#34;] output[&#34;filter&#34;] = json_result[&#34;filter&#34;]
output[&#34;type&#34;] = json_result[&#34;type&#34;] output[&#34;type&#34;] = json_result[&#34;type&#34;]
chat_history.append({&#34;role&#34;: &#34;assistant&#34;, &#34;content&#34;: result[&#34;content&#34;], &#34;function_call&#34;: {&#34;name&#34;: result[&#34;function_call&#34;][&#34;name&#34;], &#34;arguments&#34;: json.dumps(json_result)}}) chat_history.append({
&#34;role&#34;: &#34;assistant&#34;,
&#34;content&#34;: msg.content,
&#34;function_call&#34;: {
&#34;name&#34;: msg.function_call.name,
&#34;arguments&#34;: json.dumps(json_result)
}
})
else:
# Fallback defensivo si no hay nada
output[&#34;app_related&#34;] = False
output[&#34;response&#34;] = None
output[&#34;expected&#34;] = expected output[&#34;expected&#34;] = expected
output[&#34;chat_history&#34;] = chat_history output[&#34;chat_history&#34;] = chat_history
return output return output
@@ -1031,23 +1045,27 @@ Categorize the user&#39;s request based on the operation they want to perform on
message = [] message = []
message.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_input}) message.append({&#34;role&#34;: &#34;user&#34;, &#34;content&#34;: user_input})
functions = [self.__prompt[&#34;confirmation_function&#34;]] functions = [self.__prompt[&#34;confirmation_function&#34;]]
response = openai.ChatCompletion.create( response = self.client.chat.completions.create(
model=self.model, model=self.model,
messages=message, messages=message,
functions=functions, functions=functions,
function_call={&#34;name&#34;: &#34;get_confirmation&#34;}, function_call={&#34;name&#34;: &#34;get_confirmation&#34;},
temperature=self.temp,
top_p=1 top_p=1
) )
result = response[&#34;choices&#34;][0][&#34;message&#34;].to_dict() msg = response.choices[0].message # Es un objeto ChatCompletionMessage
json_result = json.loads(result[&#34;function_call&#34;][&#34;arguments&#34;])
output = {} output = {}
if msg.function_call and msg.function_call.arguments:
json_result = json.loads(msg.function_call.arguments)
if json_result[&#34;result&#34;] == &#34;true&#34;: if json_result[&#34;result&#34;] == &#34;true&#34;:
output[&#34;result&#34;] = True output[&#34;result&#34;] = True
elif json_result[&#34;result&#34;] == &#34;false&#34;: elif json_result[&#34;result&#34;] == &#34;false&#34;:
output[&#34;result&#34;] = False output[&#34;result&#34;] = False
elif json_result[&#34;result&#34;] == &#34;none&#34;: elif json_result[&#34;result&#34;] == &#34;none&#34;:
output[&#34;result&#34;] = json_result[&#34;response&#34;] output[&#34;result&#34;] = json_result.get(&#34;response&#34;) # .get para evitar KeyError si falta
else:
output[&#34;result&#34;] = None # O el valor que tenga sentido para tu caso
return output return output
@MethodHook @MethodHook