add history in ai chat
This commit is contained in:
		| @@ -1,2 +1,2 @@ | ||||
| __version__ = "3.2.1" | ||||
| __version__ = "3.2.2" | ||||
|  | ||||
|   | ||||
							
								
								
									
										24
									
								
								connpy/ai.py
									
									
									
									
									
								
							
							
						
						
									
										24
									
								
								connpy/ai.py
									
									
									
									
									
								
							| @@ -164,8 +164,10 @@ class ai: | ||||
|                 key, value = line.split(":", 1) | ||||
|                 key = key.strip() | ||||
|                 newvalue = {} | ||||
|                 pattern = r'\[.*?\]' | ||||
|                 match = re.search(pattern, value.strip()) | ||||
|                 try: | ||||
|                     value = ast.literal_eval(value.strip()) | ||||
|                     value = ast.literal_eval(match.group(0)) | ||||
|                     for i,e in enumerate(value, start=1): | ||||
|                         newvalue[f"command{i}"] = e | ||||
|                         if f"{{command{i}}}" not in info_dict["commands"]: | ||||
| @@ -205,14 +207,16 @@ class ai: | ||||
|         output["response"] = self._clean_command_response(output["raw_response"]) | ||||
|         return output | ||||
|  | ||||
|     def _get_filter(self, user_input): | ||||
|     def _get_filter(self, user_input, chat_history = None): | ||||
|         #Send the request to identify the filter and other attributes from the user input to GPT. | ||||
|         message = [] | ||||
|         message.append({"role": "system", "content": dedent(self.__prompt["original_system"])}) | ||||
|         message.append({"role": "user", "content": dedent(self.__prompt["original_user"])}) | ||||
|         message.append({"role": "assistant", "content": dedent(self.__prompt["original_assistant"])}) | ||||
|         message.append({"role": "user", "content": user_input}) | ||||
|  | ||||
|         if not chat_history: | ||||
|             chat_history = [] | ||||
|         chat_history.append({"role": "user", "content": user_input}) | ||||
|         message.extend(chat_history) | ||||
|         response = openai.ChatCompletion.create( | ||||
|             model=self.model, | ||||
|             messages=message, | ||||
| @@ -223,11 +227,13 @@ class ai: | ||||
|         output = {} | ||||
|         output["dict_response"] = response | ||||
|         output["raw_response"] = response["choices"][0]["message"]["content"]  | ||||
|         chat_history.append({"role": "assistant", "content": output["raw_response"]}) | ||||
|         output["chat_history"] = chat_history | ||||
|         clear_response = self._clean_original_response(output["raw_response"]) | ||||
|         output["response"] = self._clean_original_response(output["raw_response"]) | ||||
|         return output | ||||
|          | ||||
|     def ask(self, user_input, dryrun = False): | ||||
|     def ask(self, user_input, dryrun = False, chat_history = None): | ||||
|         ''' | ||||
|         Send the user input to openAI GPT and parse the response to run an action in the application. | ||||
|  | ||||
| @@ -266,14 +272,18 @@ class ai: | ||||
|                     on the nodes. | ||||
|                   - result: A dictionary with the output of the commands or  | ||||
|                     the test. | ||||
|                   - chat_history: The chat history between user and chatbot. | ||||
|                     It can be used as an attribute for next request. | ||||
|                  | ||||
|                      | ||||
|  | ||||
|         ''' | ||||
|         output = {} | ||||
|         original = self._get_filter(user_input) | ||||
|         original = self._get_filter(user_input, chat_history) | ||||
|         output["input"] = user_input | ||||
|         output["app_related"] = original["response"]["app_related"] | ||||
|         output["dryrun"] = dryrun | ||||
|         output["chat_history"] = original["chat_history"] | ||||
|         if not output["app_related"]: | ||||
|             output["response"] = original["response"]["response"] | ||||
|         else: | ||||
| @@ -289,7 +299,7 @@ class ai: | ||||
|                     thisnodes = self.config._getallnodesfull(output["filter"]) | ||||
|                     output["nodes"] = list(thisnodes.keys()) | ||||
|             if not type == "command": | ||||
|                 output["action"] = type | ||||
|                 output["action"] = "list_nodes" | ||||
|             else: | ||||
|                 commands = self._get_commands(user_input, thisnodes) | ||||
|                 output["args"] = {} | ||||
|   | ||||
| @@ -63,8 +63,12 @@ def ask_ai(): | ||||
|         dryrun = data["dryrun"]  | ||||
|     else: | ||||
|         dryrun = False | ||||
|     if "chat_history" in data: | ||||
|         chat_history = data["chat_history"] | ||||
|     else: | ||||
|         chat_history = None | ||||
|     ai = myai(conf) | ||||
|     return ai.ask(input, dryrun) | ||||
|     return ai.ask(input, dryrun, chat_history) | ||||
|  | ||||
|  | ||||
| @app.route("/run_commands", methods=["POST"]) | ||||
|   | ||||
| @@ -730,8 +730,10 @@ __pdoc__ = { | ||||
|                 key, value = line.split(":", 1) | ||||
|                 key = key.strip() | ||||
|                 newvalue = {} | ||||
|                 pattern = r'\[.*?\]' | ||||
|                 match = re.search(pattern, value.strip()) | ||||
|                 try: | ||||
|                     value = ast.literal_eval(value.strip()) | ||||
|                     value = ast.literal_eval(match.group(0)) | ||||
|                     for i,e in enumerate(value, start=1): | ||||
|                         newvalue[f"command{i}"] = e | ||||
|                         if f"{{command{i}}}" not in info_dict["commands"]: | ||||
| @@ -771,14 +773,16 @@ __pdoc__ = { | ||||
|         output["response"] = self._clean_command_response(output["raw_response"]) | ||||
|         return output | ||||
|  | ||||
|     def _get_filter(self, user_input): | ||||
|     def _get_filter(self, user_input, chat_history = None): | ||||
|         #Send the request to identify the filter and other attributes from the user input to GPT. | ||||
|         message = [] | ||||
|         message.append({"role": "system", "content": dedent(self.__prompt["original_system"])}) | ||||
|         message.append({"role": "user", "content": dedent(self.__prompt["original_user"])}) | ||||
|         message.append({"role": "assistant", "content": dedent(self.__prompt["original_assistant"])}) | ||||
|         message.append({"role": "user", "content": user_input}) | ||||
|  | ||||
|         if not chat_history: | ||||
|             chat_history = [] | ||||
|         chat_history.append({"role": "user", "content": user_input}) | ||||
|         message.extend(chat_history) | ||||
|         response = openai.ChatCompletion.create( | ||||
|             model=self.model, | ||||
|             messages=message, | ||||
| @@ -789,11 +793,13 @@ __pdoc__ = { | ||||
|         output = {} | ||||
|         output["dict_response"] = response | ||||
|         output["raw_response"] = response["choices"][0]["message"]["content"]  | ||||
|         chat_history.append({"role": "assistant", "content": output["raw_response"]}) | ||||
|         output["chat_history"] = chat_history | ||||
|         clear_response = self._clean_original_response(output["raw_response"]) | ||||
|         output["response"] = self._clean_original_response(output["raw_response"]) | ||||
|         return output | ||||
|          | ||||
|     def ask(self, user_input, dryrun = False): | ||||
|     def ask(self, user_input, dryrun = False, chat_history = None): | ||||
|         ''' | ||||
|         Send the user input to openAI GPT and parse the response to run an action in the application. | ||||
|  | ||||
| @@ -832,14 +838,18 @@ __pdoc__ = { | ||||
|                     on the nodes. | ||||
|                   - result: A dictionary with the output of the commands or  | ||||
|                     the test. | ||||
|                   - chat_history: The chat history between user and chatbot. | ||||
|                     It can be used as an attribute for next request. | ||||
|                  | ||||
|                      | ||||
|  | ||||
|         ''' | ||||
|         output = {} | ||||
|         original = self._get_filter(user_input) | ||||
|         original = self._get_filter(user_input, chat_history) | ||||
|         output["input"] = user_input | ||||
|         output["app_related"] = original["response"]["app_related"] | ||||
|         output["dryrun"] = dryrun | ||||
|         output["chat_history"] = original["chat_history"] | ||||
|         if not output["app_related"]: | ||||
|             output["response"] = original["response"]["response"] | ||||
|         else: | ||||
| @@ -855,7 +865,7 @@ __pdoc__ = { | ||||
|                     thisnodes = self.config._getallnodesfull(output["filter"]) | ||||
|                     output["nodes"] = list(thisnodes.keys()) | ||||
|             if not type == "command": | ||||
|                 output["action"] = type | ||||
|                 output["action"] = "list_nodes" | ||||
|             else: | ||||
|                 commands = self._get_commands(user_input, thisnodes) | ||||
|                 output["args"] = {} | ||||
| @@ -877,7 +887,7 @@ __pdoc__ = { | ||||
| <h3>Methods</h3> | ||||
| <dl> | ||||
| <dt id="connpy.ai.ask"><code class="name flex"> | ||||
| <span>def <span class="ident">ask</span></span>(<span>self, user_input, dryrun=False)</span> | ||||
| <span>def <span class="ident">ask</span></span>(<span>self, user_input, dryrun=False, chat_history=None)</span> | ||||
| </code></dt> | ||||
| <dd> | ||||
| <div class="desc"><p>Send the user input to openAI GPT and parse the response to run an action in the application.</p> | ||||
| @@ -913,12 +923,14 @@ __pdoc__ = { | ||||
|         on the nodes. | ||||
|       - result: A dictionary with the output of the commands or  | ||||
|         the test. | ||||
|       - chat_history: The chat history between user and chatbot. | ||||
|         It can be used as an attribute for next request. | ||||
| </code></pre></div> | ||||
| <details class="source"> | ||||
| <summary> | ||||
| <span>Expand source code</span> | ||||
| </summary> | ||||
| <pre><code class="python">def ask(self, user_input, dryrun = False): | ||||
| <pre><code class="python">def ask(self, user_input, dryrun = False, chat_history = None): | ||||
|     ''' | ||||
|     Send the user input to openAI GPT and parse the response to run an action in the application. | ||||
|  | ||||
| @@ -957,14 +969,18 @@ __pdoc__ = { | ||||
|                 on the nodes. | ||||
|               - result: A dictionary with the output of the commands or  | ||||
|                 the test. | ||||
|               - chat_history: The chat history between user and chatbot. | ||||
|                 It can be used as an attribute for next request. | ||||
|              | ||||
|                  | ||||
|  | ||||
|     ''' | ||||
|     output = {} | ||||
|     original = self._get_filter(user_input) | ||||
|     original = self._get_filter(user_input, chat_history) | ||||
|     output["input"] = user_input | ||||
|     output["app_related"] = original["response"]["app_related"] | ||||
|     output["dryrun"] = dryrun | ||||
|     output["chat_history"] = original["chat_history"] | ||||
|     if not output["app_related"]: | ||||
|         output["response"] = original["response"]["response"] | ||||
|     else: | ||||
| @@ -980,7 +996,7 @@ __pdoc__ = { | ||||
|                 thisnodes = self.config._getallnodesfull(output["filter"]) | ||||
|                 output["nodes"] = list(thisnodes.keys()) | ||||
|         if not type == "command": | ||||
|             output["action"] = type | ||||
|             output["action"] = "list_nodes" | ||||
|         else: | ||||
|             commands = self._get_commands(user_input, thisnodes) | ||||
|             output["args"] = {} | ||||
|   | ||||
		Reference in New Issue
	
	Block a user