add history in ai chat

This commit is contained in:
fluzzi 2023-05-10 12:34:19 -03:00
parent a74d055993
commit 0e34ea79c6
4 changed files with 50 additions and 20 deletions

View File

@ -1,2 +1,2 @@
__version__ = "3.2.1" __version__ = "3.2.2"

View File

@ -164,8 +164,10 @@ class ai:
key, value = line.split(":", 1) key, value = line.split(":", 1)
key = key.strip() key = key.strip()
newvalue = {} newvalue = {}
pattern = r'\[.*?\]'
match = re.search(pattern, value.strip())
try: try:
value = ast.literal_eval(value.strip()) value = ast.literal_eval(match.group(0))
for i,e in enumerate(value, start=1): for i,e in enumerate(value, start=1):
newvalue[f"command{i}"] = e newvalue[f"command{i}"] = e
if f"{{command{i}}}" not in info_dict["commands"]: if f"{{command{i}}}" not in info_dict["commands"]:
@ -205,14 +207,16 @@ class ai:
output["response"] = self._clean_command_response(output["raw_response"]) output["response"] = self._clean_command_response(output["raw_response"])
return output return output
def _get_filter(self, user_input): def _get_filter(self, user_input, chat_history = None):
#Send the request to identify the filter and other attributes from the user input to GPT. #Send the request to identify the filter and other attributes from the user input to GPT.
message = [] message = []
message.append({"role": "system", "content": dedent(self.__prompt["original_system"])}) message.append({"role": "system", "content": dedent(self.__prompt["original_system"])})
message.append({"role": "user", "content": dedent(self.__prompt["original_user"])}) message.append({"role": "user", "content": dedent(self.__prompt["original_user"])})
message.append({"role": "assistant", "content": dedent(self.__prompt["original_assistant"])}) message.append({"role": "assistant", "content": dedent(self.__prompt["original_assistant"])})
message.append({"role": "user", "content": user_input}) if not chat_history:
chat_history = []
chat_history.append({"role": "user", "content": user_input})
message.extend(chat_history)
response = openai.ChatCompletion.create( response = openai.ChatCompletion.create(
model=self.model, model=self.model,
messages=message, messages=message,
@ -223,11 +227,13 @@ class ai:
output = {} output = {}
output["dict_response"] = response output["dict_response"] = response
output["raw_response"] = response["choices"][0]["message"]["content"] output["raw_response"] = response["choices"][0]["message"]["content"]
chat_history.append({"role": "assistant", "content": output["raw_response"]})
output["chat_history"] = chat_history
clear_response = self._clean_original_response(output["raw_response"]) clear_response = self._clean_original_response(output["raw_response"])
output["response"] = self._clean_original_response(output["raw_response"]) output["response"] = self._clean_original_response(output["raw_response"])
return output return output
def ask(self, user_input, dryrun = False): def ask(self, user_input, dryrun = False, chat_history = None):
''' '''
Send the user input to openAI GPT and parse the response to run an action in the application. Send the user input to openAI GPT and parse the response to run an action in the application.
@ -266,14 +272,18 @@ class ai:
on the nodes. on the nodes.
- result: A dictionary with the output of the commands or - result: A dictionary with the output of the commands or
the test. the test.
- chat_history: The chat history between user and chatbot.
It can be used as an attribute for next request.
''' '''
output = {} output = {}
original = self._get_filter(user_input) original = self._get_filter(user_input, chat_history)
output["input"] = user_input output["input"] = user_input
output["app_related"] = original["response"]["app_related"] output["app_related"] = original["response"]["app_related"]
output["dryrun"] = dryrun output["dryrun"] = dryrun
output["chat_history"] = original["chat_history"]
if not output["app_related"]: if not output["app_related"]:
output["response"] = original["response"]["response"] output["response"] = original["response"]["response"]
else: else:
@ -289,7 +299,7 @@ class ai:
thisnodes = self.config._getallnodesfull(output["filter"]) thisnodes = self.config._getallnodesfull(output["filter"])
output["nodes"] = list(thisnodes.keys()) output["nodes"] = list(thisnodes.keys())
if not type == "command": if not type == "command":
output["action"] = type output["action"] = "list_nodes"
else: else:
commands = self._get_commands(user_input, thisnodes) commands = self._get_commands(user_input, thisnodes)
output["args"] = {} output["args"] = {}

View File

@ -63,8 +63,12 @@ def ask_ai():
dryrun = data["dryrun"] dryrun = data["dryrun"]
else: else:
dryrun = False dryrun = False
if "chat_history" in data:
chat_history = data["chat_history"]
else:
chat_history = None
ai = myai(conf) ai = myai(conf)
return ai.ask(input, dryrun) return ai.ask(input, dryrun, chat_history)
@app.route("/run_commands", methods=["POST"]) @app.route("/run_commands", methods=["POST"])

View File

@ -730,8 +730,10 @@ __pdoc__ = {
key, value = line.split(":", 1) key, value = line.split(":", 1)
key = key.strip() key = key.strip()
newvalue = {} newvalue = {}
pattern = r'\[.*?\]'
match = re.search(pattern, value.strip())
try: try:
value = ast.literal_eval(value.strip()) value = ast.literal_eval(match.group(0))
for i,e in enumerate(value, start=1): for i,e in enumerate(value, start=1):
newvalue[f"command{i}"] = e newvalue[f"command{i}"] = e
if f"{{command{i}}}" not in info_dict["commands"]: if f"{{command{i}}}" not in info_dict["commands"]:
@ -771,14 +773,16 @@ __pdoc__ = {
output["response"] = self._clean_command_response(output["raw_response"]) output["response"] = self._clean_command_response(output["raw_response"])
return output return output
def _get_filter(self, user_input): def _get_filter(self, user_input, chat_history = None):
#Send the request to identify the filter and other attributes from the user input to GPT. #Send the request to identify the filter and other attributes from the user input to GPT.
message = [] message = []
message.append({"role": "system", "content": dedent(self.__prompt["original_system"])}) message.append({"role": "system", "content": dedent(self.__prompt["original_system"])})
message.append({"role": "user", "content": dedent(self.__prompt["original_user"])}) message.append({"role": "user", "content": dedent(self.__prompt["original_user"])})
message.append({"role": "assistant", "content": dedent(self.__prompt["original_assistant"])}) message.append({"role": "assistant", "content": dedent(self.__prompt["original_assistant"])})
message.append({"role": "user", "content": user_input}) if not chat_history:
chat_history = []
chat_history.append({"role": "user", "content": user_input})
message.extend(chat_history)
response = openai.ChatCompletion.create( response = openai.ChatCompletion.create(
model=self.model, model=self.model,
messages=message, messages=message,
@ -789,11 +793,13 @@ __pdoc__ = {
output = {} output = {}
output["dict_response"] = response output["dict_response"] = response
output["raw_response"] = response["choices"][0]["message"]["content"] output["raw_response"] = response["choices"][0]["message"]["content"]
chat_history.append({"role": "assistant", "content": output["raw_response"]})
output["chat_history"] = chat_history
clear_response = self._clean_original_response(output["raw_response"]) clear_response = self._clean_original_response(output["raw_response"])
output["response"] = self._clean_original_response(output["raw_response"]) output["response"] = self._clean_original_response(output["raw_response"])
return output return output
def ask(self, user_input, dryrun = False): def ask(self, user_input, dryrun = False, chat_history = None):
''' '''
Send the user input to openAI GPT and parse the response to run an action in the application. Send the user input to openAI GPT and parse the response to run an action in the application.
@ -832,14 +838,18 @@ __pdoc__ = {
on the nodes. on the nodes.
- result: A dictionary with the output of the commands or - result: A dictionary with the output of the commands or
the test. the test.
- chat_history: The chat history between user and chatbot.
It can be used as an attribute for next request.
''' '''
output = {} output = {}
original = self._get_filter(user_input) original = self._get_filter(user_input, chat_history)
output["input"] = user_input output["input"] = user_input
output["app_related"] = original["response"]["app_related"] output["app_related"] = original["response"]["app_related"]
output["dryrun"] = dryrun output["dryrun"] = dryrun
output["chat_history"] = original["chat_history"]
if not output["app_related"]: if not output["app_related"]:
output["response"] = original["response"]["response"] output["response"] = original["response"]["response"]
else: else:
@ -855,7 +865,7 @@ __pdoc__ = {
thisnodes = self.config._getallnodesfull(output["filter"]) thisnodes = self.config._getallnodesfull(output["filter"])
output["nodes"] = list(thisnodes.keys()) output["nodes"] = list(thisnodes.keys())
if not type == "command": if not type == "command":
output["action"] = type output["action"] = "list_nodes"
else: else:
commands = self._get_commands(user_input, thisnodes) commands = self._get_commands(user_input, thisnodes)
output["args"] = {} output["args"] = {}
@ -877,7 +887,7 @@ __pdoc__ = {
<h3>Methods</h3> <h3>Methods</h3>
<dl> <dl>
<dt id="connpy.ai.ask"><code class="name flex"> <dt id="connpy.ai.ask"><code class="name flex">
<span>def <span class="ident">ask</span></span>(<span>self, user_input, dryrun=False)</span> <span>def <span class="ident">ask</span></span>(<span>self, user_input, dryrun=False, chat_history=None)</span>
</code></dt> </code></dt>
<dd> <dd>
<div class="desc"><p>Send the user input to openAI GPT and parse the response to run an action in the application.</p> <div class="desc"><p>Send the user input to openAI GPT and parse the response to run an action in the application.</p>
@ -913,12 +923,14 @@ __pdoc__ = {
on the nodes. on the nodes.
- result: A dictionary with the output of the commands or - result: A dictionary with the output of the commands or
the test. the test.
- chat_history: The chat history between user and chatbot.
It can be used as an attribute for next request.
</code></pre></div> </code></pre></div>
<details class="source"> <details class="source">
<summary> <summary>
<span>Expand source code</span> <span>Expand source code</span>
</summary> </summary>
<pre><code class="python">def ask(self, user_input, dryrun = False): <pre><code class="python">def ask(self, user_input, dryrun = False, chat_history = None):
&#39;&#39;&#39; &#39;&#39;&#39;
Send the user input to openAI GPT and parse the response to run an action in the application. Send the user input to openAI GPT and parse the response to run an action in the application.
@ -957,14 +969,18 @@ __pdoc__ = {
on the nodes. on the nodes.
- result: A dictionary with the output of the commands or - result: A dictionary with the output of the commands or
the test. the test.
- chat_history: The chat history between user and chatbot.
It can be used as an attribute for next request.
&#39;&#39;&#39; &#39;&#39;&#39;
output = {} output = {}
original = self._get_filter(user_input) original = self._get_filter(user_input, chat_history)
output[&#34;input&#34;] = user_input output[&#34;input&#34;] = user_input
output[&#34;app_related&#34;] = original[&#34;response&#34;][&#34;app_related&#34;] output[&#34;app_related&#34;] = original[&#34;response&#34;][&#34;app_related&#34;]
output[&#34;dryrun&#34;] = dryrun output[&#34;dryrun&#34;] = dryrun
output[&#34;chat_history&#34;] = original[&#34;chat_history&#34;]
if not output[&#34;app_related&#34;]: if not output[&#34;app_related&#34;]:
output[&#34;response&#34;] = original[&#34;response&#34;][&#34;response&#34;] output[&#34;response&#34;] = original[&#34;response&#34;][&#34;response&#34;]
else: else:
@ -980,7 +996,7 @@ __pdoc__ = {
thisnodes = self.config._getallnodesfull(output[&#34;filter&#34;]) thisnodes = self.config._getallnodesfull(output[&#34;filter&#34;])
output[&#34;nodes&#34;] = list(thisnodes.keys()) output[&#34;nodes&#34;] = list(thisnodes.keys())
if not type == &#34;command&#34;: if not type == &#34;command&#34;:
output[&#34;action&#34;] = type output[&#34;action&#34;] = &#34;list_nodes&#34;
else: else:
commands = self._get_commands(user_input, thisnodes) commands = self._get_commands(user_input, thisnodes)
output[&#34;args&#34;] = {} output[&#34;args&#34;] = {}