AssistantAgent

Bases: WiseAgent

This utility agent start a web interface and pass the user input to another agent. The web interface will be running at http://127.0.0.1:7860

Source code in wiseagents/agents/assistant.py
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
class AssistantAgent(WiseAgent):
    """
    This utility agent start a web interface and pass the user input to another agent.
    The web interface will be running at http://127.0.0.1:7860
    """
    yaml_tag = u'!wiseagents.agents.AssistantAgent'

    _response_delivery = None
    _cond = threading.Condition()
    _response : WiseAgentMessage = None
    _ctx = None

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData , transport: WiseAgentTransport,
                 destination_agent_name: str):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            transport (WiseAgentTransport): the transport to use for communication
            destination_agent_name (str): the name of the agent to send requests to
        """
        self._name = name
        self._destination_agent_name = destination_agent_name
        super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

    def __repr__(self):
        """Return a string representation of the agent."""
        return f"{self.__class__.__name__}(name={self.name}, \
            metadata={self.metadata}, transport={self.transport}, \
            destination_agent_name={self.destination_agent_name},\
            response_delivery={self.response_delivery}"

    def start_agent(self):
        super().start_agent()
        self._ctx = f'{self.name}.{str(uuid.uuid4())}'
        WiseAgentRegistry.create_context(self._ctx).set_collaboration_type(WiseAgentCollaborationType.CHAT)
        gradio.ChatInterface(self.slow_echo).launch(prevent_thread_lock=True)

    def stop_agent(self):
        super().stop_agent()
        WiseAgentRegistry.remove_context(self._ctx)

    def slow_echo(self, message, history):
            with self._cond:
                self.handle_request(WiseAgentMessage(message=message, sender=self.name, context_name=self._ctx))
                self._cond.wait()
                return self._response.message

    def process_request(self, request: WiseAgentMessage,
                        conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message by just passing it to another agent.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        logging.getLogger(self.name).info(f"AssistantAgent: process_request: {request}")
        WiseAgentRegistry.get_context(request.context_name).append_chat_completion({"role": "user", "content": request.message})
        self.send_request(request, self.destination_agent_name)
        return None

    def process_response(self, response : WiseAgentMessage):
        """Process a response message just sending it back to the client."""
        logging.getLogger(self.name).info(f"AssistantAgent: process_response: {response}")
        with self._cond:
            self._response = response
            self._cond.notify()
        return True

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def destination_agent_name(self) -> str:
        """Get the name of the agent to send requests to."""
        return self._destination_agent_name

    @property
    def response_delivery(self) -> Optional[Callable[[], WiseAgentMessage]]:
        """Get the function to deliver the response to the client.
        return (Callable[[], WiseAgentMessage]): the function to deliver the response to the client"""
        return self._response_delivery

    def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
        """
        Set the function to deliver the response to the client.

        Args:
            response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the client
        """
        self._response_delivery = response_delivery

destination_agent_name: str property

Get the name of the agent to send requests to.

name: str property

Get the name of the agent.

response_delivery: Optional[Callable[[], WiseAgentMessage]] property

Get the function to deliver the response to the client. return (Callable[[], WiseAgentMessage]): the function to deliver the response to the client

__init__(name, metadata, transport, destination_agent_name)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • destination_agent_name (str) –

    the name of the agent to send requests to

Source code in wiseagents/agents/assistant.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def __init__(self, name: str, metadata: WiseAgentMetaData , transport: WiseAgentTransport,
             destination_agent_name: str):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        transport (WiseAgentTransport): the transport to use for communication
        destination_agent_name (str): the name of the agent to send requests to
    """
    self._name = name
    self._destination_agent_name = destination_agent_name
    super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/assistant.py
26
27
28
29
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/assistant.py
46
47
48
49
50
51
def __repr__(self):
    """Return a string representation of the agent."""
    return f"{self.__class__.__name__}(name={self.name}, \
        metadata={self.metadata}, transport={self.transport}, \
        destination_agent_name={self.destination_agent_name},\
        response_delivery={self.response_delivery}"

process_error(error)

Do nothing

Source code in wiseagents/agents/assistant.py
101
102
103
def process_error(self, error):
    """Do nothing"""
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/assistant.py
97
98
99
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message by just passing it to another agent.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • Optional[str]

    Optional[str]: the response to the request message as a string or None if there is

  • Optional[str]

    no string response yet

Source code in wiseagents/agents/assistant.py
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def process_request(self, request: WiseAgentMessage,
                    conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message by just passing it to another agent.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    logging.getLogger(self.name).info(f"AssistantAgent: process_request: {request}")
    WiseAgentRegistry.get_context(request.context_name).append_chat_completion({"role": "user", "content": request.message})
    self.send_request(request, self.destination_agent_name)
    return None

process_response(response)

Process a response message just sending it back to the client.

Source code in wiseagents/agents/assistant.py
89
90
91
92
93
94
95
def process_response(self, response : WiseAgentMessage):
    """Process a response message just sending it back to the client."""
    logging.getLogger(self.name).info(f"AssistantAgent: process_response: {response}")
    with self._cond:
        self._response = response
        self._cond.notify()
    return True

set_response_delivery(response_delivery)

Set the function to deliver the response to the client.

Parameters:
  • response_delivery (Callable[[], WiseAgentMessage]) –

    the function to deliver the response to the client

Source code in wiseagents/agents/assistant.py
125
126
127
128
129
130
131
132
def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
    """
    Set the function to deliver the response to the client.

    Args:
        response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the client
    """
    self._response_delivery = response_delivery

stop()

Do nothing

Source code in wiseagents/agents/assistant.py
105
106
107
def stop(self):
    """Do nothing"""
    pass

BaseCoVeChallengerWiseAgent

Bases: WiseAgent

This abstract agent implementation is used to challenge the response from a RAG or Graph RAG agent using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent hallucinations.

Source code in wiseagents/agents/rag_wise_agents.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
class BaseCoVeChallengerWiseAgent(WiseAgent):
    """
    This abstract agent implementation is used to challenge the response from a RAG or Graph RAG agent
    using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent
    hallucinations.
    """

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        enforce_no_abstract_class_instances(cls, BaseCoVeChallengerWiseAgent)
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
        obj._vector_db = None
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._graph_db = None
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, transport: WiseAgentTransport,
                 k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS,
                 vector_db: Optional[WiseAgentVectorDB] = None, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
                 graph_db: Optional[WiseAgentGraphDB] = None):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            transport (WiseAgentTransport): the transport to use for communication
            k Optional(int): the number of documents to retrieve from the vector database, defaults to 4
            num_verification_questions Optional(int): the number of verification questions to generate, defaults to 4
            vector_db (Optional[WiseAgentVectorDB]): the vector DB associated with the agent (to be used for challenging RAG results)
            collection_name (Optional[str]) = "wise-agent-collection": the vector DB collection name associated with the agent
            graph_db (Optional[WiseAgentGraphDB]): the graph DB associated with the agent (to be used for challenging Graph RAG results)
        """
        self._k = k
        self._num_verification_questions = num_verification_questions
        self._vector_db = vector_db
        llm_agent = llm
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm_agent,
                         vector_db=vector_db, collection_name=collection_name, graph_db=graph_db)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"k={self.k}, num_verification_questions={self._num_verification_questions},"
                f"transport={self.transport}, vector_db={self.vector_db}, collection_name={self.collection_name},"
                f"graph_db={self.graph_db})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage,
                        conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a message containing a question and a baseline response to the question
        by challenging the baseline response to generate a revised response to the original question.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            str: the response to the request message as a string
        """
        return self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def k(self) -> int:
        """Get the number of documents to retrieve."""
        return self._k

    @property
    def num_verification_questions(self) -> int:
        """Get the number of verification questions to generate."""
        return self._num_verification_questions

    def create_and_process_chain_of_verification_prompts(self, message: str,
                                                         conversation_history: List[ChatCompletionMessageParam]) -> str:
        """
        Create prompts to challenge the baseline response to a question to try to generate a revised response
        to the original question.

        Args:
            message (str): the message containing the question and baseline response
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.
        """

        # plan verifications, taking into account the baseline response and conversation history
        prompt = (f"Given the following question and baseline response, generate a list of {self.num_verification_questions} "
                  f" verification questions that could help determine if there are any mistakes in the baseline response:"
                  f"\n{message}\n"
                  f"Your response should contain only the list of questions, one per line.\n")
        if self.metadata.system_message or self.llm.system_message:
            conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
        conversation_history.append({"role": "user", "content": prompt})
        llm_response = self.llm.process_chat_completion(conversation_history, [])

        # execute verifications, answering questions independently, without the baseline response
        verification_questions = llm_response.choices[0].message.content.splitlines()[:self.num_verification_questions]
        verification_responses = ""
        for question in verification_questions:
            retrieved_documents = self.retrieve_documents(question)
            llm_response = create_and_process_rag_prompt(retrieved_documents, question, self.llm, False,
                                          [], self.metadata.system_message)
            verification_responses = (verification_responses + "Verification Question: " + question + "\n"
                                      + "Verification Result: " + llm_response + "\n")

        # generate the final revised response, conditioned on the baseline response and verification results
        complete_info = message + "\n" + verification_responses
        prompt = (f"Given the following question, baseline response, and a list of verification questions and results,"
                  f" generate a revised response incorporating the verification results:\n{complete_info}\n"
                  f"Your response must contain only the revised response to the question in the JSON format shown below:\n"
                  f"{{'revised': 'Your revised response to the question.'}}\n")

        if self.metadata.system_message or self.llm.system_message:
            conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
        conversation_history.append({"role": "user", "content": prompt})
        llm_response = self.llm.process_chat_completion(conversation_history, [])
        return llm_response.choices[0].message.content

    @abstractmethod
    def retrieve_documents(self, question: str) -> List[Document]:
        """
        Retrieve documents to be used as the context for a RAG or Graph RAG prompt.

        Args:
            question (str): the question to be used to retrieve the documents

        Returns:
            List[Document]: the list of documents retrieved for the question
        """
        ...

k: int property

Get the number of documents to retrieve.

name: str property

Get the name of the agent.

num_verification_questions: int property

Get the number of verification questions to generate.

__init__(name, metadata, llm, transport, k=DEFAULT_NUM_DOCUMENTS, num_verification_questions=DEFAULT_NUM_VERIFICATION_QUESTIONS, vector_db=None, collection_name=DEFAULT_COLLECTION_NAME, graph_db=None)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • k (Optional(int, default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve from the vector database, defaults to 4

  • num_verification_questions (Optional(int, default: DEFAULT_NUM_VERIFICATION_QUESTIONS ) –

    the number of verification questions to generate, defaults to 4

  • vector_db (Optional[WiseAgentVectorDB], default: None ) –

    the vector DB associated with the agent (to be used for challenging RAG results)

  • collection_name (Optional[str]) = "wise-agent-collection", default: DEFAULT_COLLECTION_NAME ) –

    the vector DB collection name associated with the agent

  • graph_db (Optional[WiseAgentGraphDB], default: None ) –

    the graph DB associated with the agent (to be used for challenging Graph RAG results)

Source code in wiseagents/agents/rag_wise_agents.py
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, transport: WiseAgentTransport,
             k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS,
             vector_db: Optional[WiseAgentVectorDB] = None, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
             graph_db: Optional[WiseAgentGraphDB] = None):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        transport (WiseAgentTransport): the transport to use for communication
        k Optional(int): the number of documents to retrieve from the vector database, defaults to 4
        num_verification_questions Optional(int): the number of verification questions to generate, defaults to 4
        vector_db (Optional[WiseAgentVectorDB]): the vector DB associated with the agent (to be used for challenging RAG results)
        collection_name (Optional[str]) = "wise-agent-collection": the vector DB collection name associated with the agent
        graph_db (Optional[WiseAgentGraphDB]): the graph DB associated with the agent (to be used for challenging Graph RAG results)
    """
    self._k = k
    self._num_verification_questions = num_verification_questions
    self._vector_db = vector_db
    llm_agent = llm
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm_agent,
                     vector_db=vector_db, collection_name=collection_name, graph_db=graph_db)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
253
254
255
256
257
258
259
260
261
262
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    enforce_no_abstract_class_instances(cls, BaseCoVeChallengerWiseAgent)
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
    obj._vector_db = None
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._graph_db = None
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
290
291
292
293
294
295
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"k={self.k}, num_verification_questions={self._num_verification_questions},"
            f"transport={self.transport}, vector_db={self.vector_db}, collection_name={self.collection_name},"
            f"graph_db={self.graph_db})")

create_and_process_chain_of_verification_prompts(message, conversation_history)

Create prompts to challenge the baseline response to a question to try to generate a revised response to the original question.

Parameters:
  • message (str) –

    the message containing the question and baseline response

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Source code in wiseagents/agents/rag_wise_agents.py
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def create_and_process_chain_of_verification_prompts(self, message: str,
                                                     conversation_history: List[ChatCompletionMessageParam]) -> str:
    """
    Create prompts to challenge the baseline response to a question to try to generate a revised response
    to the original question.

    Args:
        message (str): the message containing the question and baseline response
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.
    """

    # plan verifications, taking into account the baseline response and conversation history
    prompt = (f"Given the following question and baseline response, generate a list of {self.num_verification_questions} "
              f" verification questions that could help determine if there are any mistakes in the baseline response:"
              f"\n{message}\n"
              f"Your response should contain only the list of questions, one per line.\n")
    if self.metadata.system_message or self.llm.system_message:
        conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
    conversation_history.append({"role": "user", "content": prompt})
    llm_response = self.llm.process_chat_completion(conversation_history, [])

    # execute verifications, answering questions independently, without the baseline response
    verification_questions = llm_response.choices[0].message.content.splitlines()[:self.num_verification_questions]
    verification_responses = ""
    for question in verification_questions:
        retrieved_documents = self.retrieve_documents(question)
        llm_response = create_and_process_rag_prompt(retrieved_documents, question, self.llm, False,
                                      [], self.metadata.system_message)
        verification_responses = (verification_responses + "Verification Question: " + question + "\n"
                                  + "Verification Result: " + llm_response + "\n")

    # generate the final revised response, conditioned on the baseline response and verification results
    complete_info = message + "\n" + verification_responses
    prompt = (f"Given the following question, baseline response, and a list of verification questions and results,"
              f" generate a revised response incorporating the verification results:\n{complete_info}\n"
              f"Your response must contain only the revised response to the question in the JSON format shown below:\n"
              f"{{'revised': 'Your revised response to the question.'}}\n")

    if self.metadata.system_message or self.llm.system_message:
        conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
    conversation_history.append({"role": "user", "content": prompt})
    llm_response = self.llm.process_chat_completion(conversation_history, [])
    return llm_response.choices[0].message.content

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
301
302
303
304
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
297
298
299
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a message containing a question and a baseline response to the question by challenging the baseline response to generate a revised response to the original question.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • str( Optional[str] ) –

    the response to the request message as a string

Source code in wiseagents/agents/rag_wise_agents.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
def process_request(self, request: WiseAgentMessage,
                    conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a message containing a question and a baseline response to the question
    by challenging the baseline response to generate a revised response to the original question.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        str: the response to the request message as a string
    """
    return self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
323
324
325
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

retrieve_documents(question) abstractmethod

Retrieve documents to be used as the context for a RAG or Graph RAG prompt.

Parameters:
  • question (str) –

    the question to be used to retrieve the documents

Returns:
  • List[Document]

    List[Document]: the list of documents retrieved for the question

Source code in wiseagents/agents/rag_wise_agents.py
392
393
394
395
396
397
398
399
400
401
402
403
@abstractmethod
def retrieve_documents(self, question: str) -> List[Document]:
    """
    Retrieve documents to be used as the context for a RAG or Graph RAG prompt.

    Args:
        question (str): the question to be used to retrieve the documents

    Returns:
        List[Document]: the list of documents retrieved for the question
    """
    ...

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
327
328
329
def stop(self):
    """Do nothing"""
    pass

CoVeChallengerRAGWiseAgent

Bases: BaseCoVeChallengerWiseAgent

This agent implementation is used to challenge the response from a RAG agent using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent hallucinations.

Source code in wiseagents/agents/rag_wise_agents.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
class CoVeChallengerRAGWiseAgent(BaseCoVeChallengerWiseAgent):
    """
    This agent implementation is used to challenge the response from a RAG agent using the
    Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent
    hallucinations.
    """
    yaml_tag = u'!wiseagents.agents.CoVeChallengerRAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
                 transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
                 k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            collection_name (Optional[str]): the name of the collection to use in the vector database, defaults to wise-agents-collection
            k (Optional[int]): the number of documents to retrieve from the vector database, defaults to 4
            num_verification_questions (Optional[int]): the number of verification questions to generate, defaults to 4
        """
        self._k = k
        self._num_verification_questions = num_verification_questions
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                         vector_db=vector_db, collection_name=collection_name,
                         k=k, num_verification_questions=num_verification_questions)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"vector_db={self.vector_db}, collection_name={self.collection_name}, k={self.k},"
                f"num_verification_questions={self._num_verification_questions}, transport={self.transport})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a message containing a question and a baseline response to the question
        by challenging the baseline response to generate a revised response to the original question.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            str: the response to the request message as a string
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to challenge it")
        llm_response = self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)
        return llm_response

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    def retrieve_documents(self, question: str) -> List[Document]:
        return retrieve_documents_for_rag(question, self.vector_db, self.collection_name, self.k)

__init__(name, metadata, llm, vector_db, transport, collection_name=DEFAULT_COLLECTION_NAME, k=DEFAULT_NUM_DOCUMENTS, num_verification_questions=DEFAULT_NUM_VERIFICATION_QUESTIONS)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • vector_db (WiseAgentVectorDB) –

    the vector database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • collection_name (Optional[str], default: DEFAULT_COLLECTION_NAME ) –

    the name of the collection to use in the vector database, defaults to wise-agents-collection

  • k (Optional[int], default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve from the vector database, defaults to 4

  • num_verification_questions (Optional[int], default: DEFAULT_NUM_VERIFICATION_QUESTIONS ) –

    the number of verification questions to generate, defaults to 4

Source code in wiseagents/agents/rag_wise_agents.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
             transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
             k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        collection_name (Optional[str]): the name of the collection to use in the vector database, defaults to wise-agents-collection
        k (Optional[int]): the number of documents to retrieve from the vector database, defaults to 4
        num_verification_questions (Optional[int]): the number of verification questions to generate, defaults to 4
    """
    self._k = k
    self._num_verification_questions = num_verification_questions
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                     vector_db=vector_db, collection_name=collection_name,
                     k=k, num_verification_questions=num_verification_questions)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
414
415
416
417
418
419
420
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
445
446
447
448
449
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"vector_db={self.vector_db}, collection_name={self.collection_name}, k={self.k},"
            f"num_verification_questions={self._num_verification_questions}, transport={self.transport})")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
455
456
457
458
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
451
452
453
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a message containing a question and a baseline response to the question by challenging the baseline response to generate a revised response to the original question.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • str( Optional[str] ) –

    the response to the request message as a string

Source code in wiseagents/agents/rag_wise_agents.py
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a message containing a question and a baseline response to the question
    by challenging the baseline response to generate a revised response to the original question.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        str: the response to the request message as a string
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to challenge it")
    llm_response = self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)
    return llm_response

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
478
479
480
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
482
483
484
def stop(self):
    """Do nothing"""
    pass

GraphRAGWiseAgent

Bases: WiseAgent

This agent implementation makes use of Graph Retrieval Augmented Generation (Graph RAG) to answer questions.

Source code in wiseagents/agents/rag_wise_agents.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
class GraphRAGWiseAgent(WiseAgent):
    """
    This agent implementation makes use of Graph Retrieval Augmented Generation (Graph RAG) to answer questions.
    """
    yaml_tag = u'!wiseagents.agents.GraphRAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._include_sources = DEFAULT_INCLUDE_SOURCES
        obj._retrieval_query = ""
        obj._params = None
        obj._metadata_filter = None
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, graph_db: WiseAgentGraphDB,
                 transport: WiseAgentTransport, k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES,
                 retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
                 metadata_filter: Optional[Dict[str, Any]] = None):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM to use for processing requests
            graph_db (WiseAgentGraphDB): the graph database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            k (Optional[int]): the number of documents to retrieve for each query, defaults to 4
            include_sources Optional(bool): whether to include the sources of the documents that were consulted to
            produce the response, defaults to False
            retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
            retrieved from a similarity search
            params (Optional[Dict[str, Any]]): the optional parameters for the query
            metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search
        """
        self._k = k
        self._include_sources = include_sources
        self._retrieval_query = retrieval_query
        self._params = params
        self._metadata_filter = metadata_filter
        super().__init__(name=name, metadata=metadata, transport=self.transport, llm=llm,
                         graph_db=graph_db)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"graph_db={self.graph_db}, transport={self.transport}, k={self.k},"
                f"include_sources={self.include_sources}), retrieval_query={self.retrieval_query},"
                f"params={self.params}, metadata_filter={self.metadata_filter})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message by passing it to the RAG agent and sending the response back to the client.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using Graph RAG")
        retrieved_documents = retrieve_documents_for_graph_rag(request.message, self.graph_db, self.k,
                                                               self.retrieval_query, self.params, self.metadata_filter)
        llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm, self.include_sources,
                                                                   conversation_history, self.metadata.system_message)
        return llm_response_with_sources

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def k(self) -> int:
        """Get the number of documents to retrieve for each query."""
        return self._k

    @property
    def include_sources(self) -> bool:
        """Get whether to include the sources of the documents that were consulted to produce the response."""
        return self._include_sources

    @property
    def retrieval_query(self) -> str:
        """Get the Cypher query to use to obtain sub-graphs connected to nodes retrieved from a similarity search."""
        return self._retrieval_query

    @property
    def params(self) -> Optional[Dict[str, Any]]:
        """Get the optional parameters for the query."""
        return self._params

    @property
    def metadata_filter(self) -> Optional[Dict[str, Any]]:
        """Get the optional metadata filter to use with similarity search."""
        return self._metadata_filter

include_sources: bool property

Get whether to include the sources of the documents that were consulted to produce the response.

k: int property

Get the number of documents to retrieve for each query.

metadata_filter: Optional[Dict[str, Any]] property

Get the optional metadata filter to use with similarity search.

name: str property

Get the name of the agent.

params: Optional[Dict[str, Any]] property

Get the optional parameters for the query.

retrieval_query: str property

Get the Cypher query to use to obtain sub-graphs connected to nodes retrieved from a similarity search.

__init__(name, metadata, llm, graph_db, transport, k=DEFAULT_NUM_DOCUMENTS, include_sources=DEFAULT_INCLUDE_SOURCES, retrieval_query='', params=None, metadata_filter=None)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM to use for processing requests

  • graph_db (WiseAgentGraphDB) –

    the graph database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • k (Optional[int], default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve for each query, defaults to 4

  • include_sources (Optional(bool, default: DEFAULT_INCLUDE_SOURCES ) –

    whether to include the sources of the documents that were consulted to

  • retrieval_query (Optional[str], default: '' ) –

    the optional retrieval query to use to obtain sub-graphs connected to nodes

  • params (Optional[Dict[str, Any]], default: None ) –

    the optional parameters for the query

  • metadata_filter (Optional[Dict[str, Any]], default: None ) –

    the optional metadata filter to use with similarity search

Source code in wiseagents/agents/rag_wise_agents.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, graph_db: WiseAgentGraphDB,
             transport: WiseAgentTransport, k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES,
             retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
             metadata_filter: Optional[Dict[str, Any]] = None):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM to use for processing requests
        graph_db (WiseAgentGraphDB): the graph database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        k (Optional[int]): the number of documents to retrieve for each query, defaults to 4
        include_sources Optional(bool): whether to include the sources of the documents that were consulted to
        produce the response, defaults to False
        retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
        retrieved from a similarity search
        params (Optional[Dict[str, Any]]): the optional parameters for the query
        metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search
    """
    self._k = k
    self._include_sources = include_sources
    self._retrieval_query = retrieval_query
    self._params = params
    self._metadata_filter = metadata_filter
    super().__init__(name=name, metadata=metadata, transport=self.transport, llm=llm,
                     graph_db=graph_db)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
130
131
132
133
134
135
136
137
138
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._include_sources = DEFAULT_INCLUDE_SOURCES
    obj._retrieval_query = ""
    obj._params = None
    obj._metadata_filter = None
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
170
171
172
173
174
175
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"graph_db={self.graph_db}, transport={self.transport}, k={self.k},"
            f"include_sources={self.include_sources}), retrieval_query={self.retrieval_query},"
            f"params={self.params}, metadata_filter={self.metadata_filter})")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
181
182
183
184
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
177
178
179
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message by passing it to the RAG agent and sending the response back to the client.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • Optional[str]

    Optional[str]: the response to the request message as a string or None if there is

  • Optional[str]

    no string response yet

Source code in wiseagents/agents/rag_wise_agents.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message by passing it to the RAG agent and sending the response back to the client.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using Graph RAG")
    retrieved_documents = retrieve_documents_for_graph_rag(request.message, self.graph_db, self.k,
                                                           self.retrieval_query, self.params, self.metadata_filter)
    llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm, self.include_sources,
                                                               conversation_history, self.metadata.system_message)
    return llm_response_with_sources

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
207
208
209
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
211
212
213
def stop(self):
    """Do nothing"""
    pass

LLMOnlyWiseAgent

Bases: WiseAgent

This utility agent simply passes a request that it receives to an LLM for processing and returns the response received from the LLM.

Source code in wiseagents/agents/utility_wise_agents.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
class LLMOnlyWiseAgent(WiseAgent):
    """
    This utility agent simply passes a request that it receives to an LLM for processing and returns the
    response received from the LLM.
    """
    yaml_tag = u'!wiseagents.agents.LLMOnlyWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the instance variables."""
        obj = super().__new__(cls)
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm : WiseAgentLLM, transport: WiseAgentTransport):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            transport (WiseAgentTransport): the transport to use for communication

        """
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm}, transport={self.transport})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message by passing it to the LLM.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        if self.metadata.system_message or self.llm.system_message:
            conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
        conversation_history.append({"role": "user", "content": request.message})
        llm_response = self.llm.process_chat_completion(conversation_history, [])
        return llm_response.choices[0].message.content

    def process_response(self, response : WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

name: str property

Get the name of the agent.

__init__(name, metadata, llm, transport)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • transport (WiseAgentTransport) –

    the transport to use for communication

Source code in wiseagents/agents/utility_wise_agents.py
113
114
115
116
117
118
119
120
121
122
123
124
def __init__(self, name: str, metadata: WiseAgentMetaData, llm : WiseAgentLLM, transport: WiseAgentTransport):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        transport (WiseAgentTransport): the transport to use for communication

    """
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the instance variables.

Source code in wiseagents/agents/utility_wise_agents.py
108
109
110
111
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the instance variables."""
    obj = super().__new__(cls)
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/utility_wise_agents.py
126
127
128
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm}, transport={self.transport})")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/utility_wise_agents.py
134
135
136
137
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
130
131
132
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message by passing it to the LLM.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • Optional[str]

    Optional[str]: the response to the request message as a string or None if there is

  • Optional[str]

    no string response yet

Source code in wiseagents/agents/utility_wise_agents.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message by passing it to the LLM.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    if self.metadata.system_message or self.llm.system_message:
        conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
    conversation_history.append({"role": "user", "content": request.message})
    llm_response = self.llm.process_chat_completion(conversation_history, [])
    return llm_response.choices[0].message.content

process_response(response)

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
159
160
161
def process_response(self, response : WiseAgentMessage):
    """Do nothing"""
    return True

LLMWiseAgentWithTools

Bases: WiseAgent

This utility agent makes use of an LLM along with tools to process a request and determine the response to send back to the client.

Source code in wiseagents/agents/utility_wise_agents.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
class LLMWiseAgentWithTools(WiseAgent):
    """
    This utility agent makes use of an LLM along with tools to process a request and determine the response
    to send back to the client.
    """
    yaml_tag = u'!wiseagents.agents.LLMWiseAgentWithTools'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the instance variables."""
        obj = super().__new__(cls)
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm : WiseAgentLLM, transport: WiseAgentTransport, tools: List[str]):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            transport (WiseAgentTransport): the transport to use for communication

        """
        self._tools = tools
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm}, transport={self.transport}")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message by passing it to the LLM agent.
        It also invokes tool(s) if required. Tool(s) could be a callback function or another agent.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'
        ctx = WiseAgentRegistry.create_sub_context(request.context_name,sub_ctx_name)
        if self.llm.system_message:
            ctx.append_chat_completion(messages= {"role": "system", "content": self.llm.system_message})
        ctx.append_chat_completion(messages= {"role": "user", "content": request.message})

        for tool in self._tools:
            ctx.append_available_tool_in_chat(tools=WiseAgentRegistry.get_tool(tool).get_tool_OpenAI_format())

        logging.debug(f"messages: {ctx.llm_chat_completion}, Tools: {ctx.llm_available_tools_in_chat}")
        # TODO: https://github.com/wise-agents/wise-agents/issues/205
        llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, ctx.llm_available_tools_in_chat)

        ##calling tool
        response_message = llm_response.choices[0].message
        tool_calls = response_message.tool_calls
        logging.debug(f"Tool calls: {tool_calls}")
        logging.debug(f"Response message: {response_message}")
        # Step 2: check if the model wanted to call a function
        if tool_calls is not None:
            # Step 3: call the function
            # TODO: the JSON response may not always be valid; be sure to handle errors
            ctx.append_chat_completion(messages= response_message)  # extend conversation with assistant's reply

            # Step 4: send the info for each function call and function response to the model
            for tool_call in tool_calls:
                #record the required tool call in the context/chatid
                ctx.append_required_tool_call(tool_name=tool_call.function.name)

            for tool_call in tool_calls:
                function_name = tool_call.function.name
                wise_agent_tool : WiseAgentTool = WiseAgentRegistry.get_tool(function_name)
                if wise_agent_tool.is_agent_tool:
                    #call the agent with correlation ID and complete the chat on response
                    self.send_request(WiseAgentMessage(message=tool_call.function.arguments, sender=self.name, 
                                                       tool_id=tool_call.id, context_name=ctx.name,
                                                       route_response_to=request.sender), 
                                      dest_agent_name=function_name)
                else:
                    function_args = json.loads(tool_call.function.arguments)
                    function_response = wise_agent_tool.exec(**function_args)
                    logging.debug(f"Function response: {function_response}")
                    ctx.append_chat_completion(messages= 
                        {
                            "tool_call_id": tool_call.id,
                            "role": "tool",
                            "name": function_name,
                            "content": function_response,
                        }
                    )  # extend conversation with function response
                    ctx.remove_required_tool_call(tool_name=tool_call.function.name)


        #SEND THE RESPONSE IF NOT ASYNC, OTHERWISE WE WILL DO LATER IN PROCESS_RESPONSE
        if ctx.llm_required_tool_call == []: # if all tool calls have been completed (no asynch needed)
            llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, 
                                                            ctx.llm_available_tools_in_chat)
            response_message = llm_response.choices[0].message
            logging.debug(f"sending response {response_message.content} to: {request.sender}")
            WiseAgentRegistry.remove_context(context_name=ctx.name, merge_chat_to_parent=False)
            return response_message.content


    def process_response(self, response : WiseAgentMessage):
        """
        Process a response message and sending the response back to the client.
        It invoke also the tool if required. Tool could be a callback function or another agent.

        Args:
            response (WiseAgentMessage): the response message to process
        """
        logging.getLogger(self.name).info(f"Response received: {response}")
        ctx = WiseAgentRegistry.get_context(response.context_name)
        ctx.append_chat_completion(messages= 
            {
                "tool_call_id": response.tool_id,
                "role": "tool",
                "name": response.sender,
                "content": response.message,
            }
        )  # extend conversation with function response
        ctx.remove_required_tool_call(tool_name=response.sender)

        if ctx.llm_required_tool_call == []: # if all tool calls have been completed (no asynch needed)
            llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, 
                                                            ctx.llm_available_tools_in_chat)
            response_message = llm_response.choices[0].message
            logging.getLogger(self.name).info(f"sending response {response_message.content} to: {response.route_response_to}")
            parent_context = WiseAgentRegistry.remove_context(context_name=response.context_name, merge_chat_to_parent=True)
            self.send_response(WiseAgentMessage(message=response_message.content, sender=self.name, context_name=parent_context.name), response.route_response_to )
            return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

name: str property

Get the name of the agent.

__init__(name, metadata, llm, transport, tools)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • transport (WiseAgentTransport) –

    the transport to use for communication

Source code in wiseagents/agents/utility_wise_agents.py
184
185
186
187
188
189
190
191
192
193
194
195
196
def __init__(self, name: str, metadata: WiseAgentMetaData, llm : WiseAgentLLM, transport: WiseAgentTransport, tools: List[str]):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        transport (WiseAgentTransport): the transport to use for communication

    """
    self._tools = tools
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the instance variables.

Source code in wiseagents/agents/utility_wise_agents.py
179
180
181
182
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the instance variables."""
    obj = super().__new__(cls)
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/utility_wise_agents.py
198
199
200
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm}, transport={self.transport}")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/utility_wise_agents.py
206
207
208
209
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
202
203
204
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message by passing it to the LLM agent. It also invokes tool(s) if required. Tool(s) could be a callback function or another agent.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Optional[str]: the response to the request message as a string or None if there is no string response yet

Source code in wiseagents/agents/utility_wise_agents.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message by passing it to the LLM agent.
    It also invokes tool(s) if required. Tool(s) could be a callback function or another agent.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'
    ctx = WiseAgentRegistry.create_sub_context(request.context_name,sub_ctx_name)
    if self.llm.system_message:
        ctx.append_chat_completion(messages= {"role": "system", "content": self.llm.system_message})
    ctx.append_chat_completion(messages= {"role": "user", "content": request.message})

    for tool in self._tools:
        ctx.append_available_tool_in_chat(tools=WiseAgentRegistry.get_tool(tool).get_tool_OpenAI_format())

    logging.debug(f"messages: {ctx.llm_chat_completion}, Tools: {ctx.llm_available_tools_in_chat}")
    # TODO: https://github.com/wise-agents/wise-agents/issues/205
    llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, ctx.llm_available_tools_in_chat)

    ##calling tool
    response_message = llm_response.choices[0].message
    tool_calls = response_message.tool_calls
    logging.debug(f"Tool calls: {tool_calls}")
    logging.debug(f"Response message: {response_message}")
    # Step 2: check if the model wanted to call a function
    if tool_calls is not None:
        # Step 3: call the function
        # TODO: the JSON response may not always be valid; be sure to handle errors
        ctx.append_chat_completion(messages= response_message)  # extend conversation with assistant's reply

        # Step 4: send the info for each function call and function response to the model
        for tool_call in tool_calls:
            #record the required tool call in the context/chatid
            ctx.append_required_tool_call(tool_name=tool_call.function.name)

        for tool_call in tool_calls:
            function_name = tool_call.function.name
            wise_agent_tool : WiseAgentTool = WiseAgentRegistry.get_tool(function_name)
            if wise_agent_tool.is_agent_tool:
                #call the agent with correlation ID and complete the chat on response
                self.send_request(WiseAgentMessage(message=tool_call.function.arguments, sender=self.name, 
                                                   tool_id=tool_call.id, context_name=ctx.name,
                                                   route_response_to=request.sender), 
                                  dest_agent_name=function_name)
            else:
                function_args = json.loads(tool_call.function.arguments)
                function_response = wise_agent_tool.exec(**function_args)
                logging.debug(f"Function response: {function_response}")
                ctx.append_chat_completion(messages= 
                    {
                        "tool_call_id": tool_call.id,
                        "role": "tool",
                        "name": function_name,
                        "content": function_response,
                    }
                )  # extend conversation with function response
                ctx.remove_required_tool_call(tool_name=tool_call.function.name)


    #SEND THE RESPONSE IF NOT ASYNC, OTHERWISE WE WILL DO LATER IN PROCESS_RESPONSE
    if ctx.llm_required_tool_call == []: # if all tool calls have been completed (no asynch needed)
        llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, 
                                                        ctx.llm_available_tools_in_chat)
        response_message = llm_response.choices[0].message
        logging.debug(f"sending response {response_message.content} to: {request.sender}")
        WiseAgentRegistry.remove_context(context_name=ctx.name, merge_chat_to_parent=False)
        return response_message.content

process_response(response)

Process a response message and sending the response back to the client. It invoke also the tool if required. Tool could be a callback function or another agent.

Parameters:
Source code in wiseagents/agents/utility_wise_agents.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
def process_response(self, response : WiseAgentMessage):
    """
    Process a response message and sending the response back to the client.
    It invoke also the tool if required. Tool could be a callback function or another agent.

    Args:
        response (WiseAgentMessage): the response message to process
    """
    logging.getLogger(self.name).info(f"Response received: {response}")
    ctx = WiseAgentRegistry.get_context(response.context_name)
    ctx.append_chat_completion(messages= 
        {
            "tool_call_id": response.tool_id,
            "role": "tool",
            "name": response.sender,
            "content": response.message,
        }
    )  # extend conversation with function response
    ctx.remove_required_tool_call(tool_name=response.sender)

    if ctx.llm_required_tool_call == []: # if all tool calls have been completed (no asynch needed)
        llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, 
                                                        ctx.llm_available_tools_in_chat)
        response_message = llm_response.choices[0].message
        logging.getLogger(self.name).info(f"sending response {response_message.content} to: {response.route_response_to}")
        parent_context = WiseAgentRegistry.remove_context(context_name=response.context_name, merge_chat_to_parent=True)
        self.send_response(WiseAgentMessage(message=response_message.content, sender=self.name, context_name=parent_context.name), response.route_response_to )
        return True

stop()

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
317
318
319
def stop(self):
    """Do nothing"""
    pass

PassThroughClientAgent

Bases: WiseAgent

This utility agent simply passes a request that it receives to another agent and sends the response back to the client.

Source code in wiseagents/agents/utility_wise_agents.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class PassThroughClientAgent(WiseAgent):
    """
    This utility agent simply passes a request that it receives to another agent and sends the
    response back to the client.
    """
    yaml_tag = u'!wiseagents.agents.PassThroughClientAgent'

    _response_delivery = None
    _destination_agent_name = None

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._destination_agent_name = "WiseIntelligentAgent"
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData , transport: WiseAgentTransport,
                 destination_agent_name: Optional[str] = "WiseIntelligentAgent"):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            transport (WiseAgentTransport): the transport to use for communication
            destination_agent_name (str): the name of the agent to send requests to
        """
        self._name = name
        self._destination_agent_name = destination_agent_name
        super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

    def __repr__(self):
        """Return a string representation of the agent."""
        return f"{self.__class__.__name__}(name={self.name}, \
            metadata={self.metadata}, transport={self.transport}, \
            destination_agent_name={self.destination_agent_name},\
            response_delivery={self.response_delivery}"

    def process_request(self, request: WiseAgentMessage,
                        conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """Process a request message by just passing it to another agent."""
        self.send_request(WiseAgentMessage(message=request, sender=self.name, context_name=request.context_name), self.destination_agent_name)
        return None

    def process_response(self, response):
        """Process a response message just sending it back to the client."""
        if self.response_delivery is not None:
            self.response_delivery(response)
        else:
            logging.debug(f"############################### Not sending response {response}")
        return True

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def destination_agent_name(self) -> str:
        """Get the name of the agent to send requests to."""
        return self._destination_agent_name

    @property
    def response_delivery(self) -> Optional[Callable[[], WiseAgentMessage]]:
        """Get the function to deliver the response to the client.
        return (Callable[[], WiseAgentMessage]): the function to deliver the response to the client"""
        return self._response_delivery

    def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
        """
        Set the function to deliver the response to the client.

        Args:
            response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the client
        """
        self._response_delivery = response_delivery

destination_agent_name: str property

Get the name of the agent to send requests to.

name: str property

Get the name of the agent.

response_delivery: Optional[Callable[[], WiseAgentMessage]] property

Get the function to deliver the response to the client. return (Callable[[], WiseAgentMessage]): the function to deliver the response to the client

__init__(name, metadata, transport, destination_agent_name='WiseIntelligentAgent')

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • destination_agent_name (str, default: 'WiseIntelligentAgent' ) –

    the name of the agent to send requests to

Source code in wiseagents/agents/utility_wise_agents.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def __init__(self, name: str, metadata: WiseAgentMetaData , transport: WiseAgentTransport,
             destination_agent_name: Optional[str] = "WiseIntelligentAgent"):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        transport (WiseAgentTransport): the transport to use for communication
        destination_agent_name (str): the name of the agent to send requests to
    """
    self._name = name
    self._destination_agent_name = destination_agent_name
    super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/utility_wise_agents.py
22
23
24
25
26
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._destination_agent_name = "WiseIntelligentAgent"
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/utility_wise_agents.py
43
44
45
46
47
48
def __repr__(self):
    """Return a string representation of the agent."""
    return f"{self.__class__.__name__}(name={self.name}, \
        metadata={self.metadata}, transport={self.transport}, \
        destination_agent_name={self.destination_agent_name},\
        response_delivery={self.response_delivery}"

process_error(error)

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
68
69
70
def process_error(self, error):
    """Do nothing"""
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
64
65
66
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message by just passing it to another agent.

Source code in wiseagents/agents/utility_wise_agents.py
50
51
52
53
54
def process_request(self, request: WiseAgentMessage,
                    conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """Process a request message by just passing it to another agent."""
    self.send_request(WiseAgentMessage(message=request, sender=self.name, context_name=request.context_name), self.destination_agent_name)
    return None

process_response(response)

Process a response message just sending it back to the client.

Source code in wiseagents/agents/utility_wise_agents.py
56
57
58
59
60
61
62
def process_response(self, response):
    """Process a response message just sending it back to the client."""
    if self.response_delivery is not None:
        self.response_delivery(response)
    else:
        logging.debug(f"############################### Not sending response {response}")
    return True

set_response_delivery(response_delivery)

Set the function to deliver the response to the client.

Parameters:
  • response_delivery (Callable[[], WiseAgentMessage]) –

    the function to deliver the response to the client

Source code in wiseagents/agents/utility_wise_agents.py
92
93
94
95
96
97
98
99
def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
    """
    Set the function to deliver the response to the client.

    Args:
        response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the client
    """
    self._response_delivery = response_delivery

stop()

Do nothing

Source code in wiseagents/agents/utility_wise_agents.py
72
73
74
def stop(self):
    """Do nothing"""
    pass

PhasedCoordinatorWiseAgent

Bases: WiseAgent

This agent will coordinate the execution of a group of agents in order to determine the response to a query. The agents will be executed in phases, where agents within a phase will be executed in parallel. After the phases have completed, the coordinator may choose to repeat the phases until it is satisfied with the final response or determines it's not possible to answer the query.

Source code in wiseagents/agents/coordinator_wise_agents.py
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
class PhasedCoordinatorWiseAgent(WiseAgent):
    """
    This agent will coordinate the execution of a group of agents in order to determine the response
    to a query. The agents will be executed in phases, where agents within a phase will be executed
    in parallel. After the phases have completed, the coordinator may choose to repeat the phases
    until it is satisfied with the final response or determines it's not possible to answer the query.
    """
    yaml_tag = u'!wiseagents.agents.PhasedCoordinatorWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the instance variables."""
        obj = super().__new__(cls)
        obj._phases = ["Data Collection", "Data Analysis"]
        obj._max_iterations = MAX_ITERATIONS_FOR_COORDINATOR
        obj._confidence_score_threshold = CONFIDENCE_SCORE_THRESHOLD
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, transport: WiseAgentTransport, llm: WiseAgentLLM,
                 phases: Optional[List[str]] = None, max_iterations: Optional[int] = MAX_ITERATIONS_FOR_COORDINATOR,
                 confidence_score_threshold: Optional[int] = CONFIDENCE_SCORE_THRESHOLD):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            transport (WiseAgentTransport): the transport to use for communication
            llm (WiseAgentLLM): the LLM to use for coordinating the collaboration
            phases (Optional[List[str]]): the optional list of phase names, defaults to "Data Collection" and "Data Analysis"
            max_iterations (Optional[int]): the maximum number of iterations to run the phases, defaults to 5
            confidence_score_threshold (Optional[int]): the confidence score threshold to determine if the final answer
            is acceptable, defaults to 85
        """
        self._name = name
        self._phases = phases if phases is not None else ["Data Collection", "Data Analysis"]
        self._max_iterations = max_iterations
        self._confidence_score_threshold = confidence_score_threshold
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, transport={self.transport},"
                f"llm={self.llm}, phases={self.phases},max_iterations={self.max_iterations}")

    @property
    def phases(self) -> List[str]:
        """Get the list of phases."""
        return self._phases

    @property
    def max_iterations(self) -> int:
        """Get the maximum number of iterations."""
        return self._max_iterations

    @property
    def confidence_score_threshold(self) -> int:
        """Get the confidence score threshold."""
        return self._confidence_score_threshold

    def handle_request(self, request):
        """
        Process a request message by kicking off the collaboration in phases.

        Args:
            request (WiseAgentMessage): the request message to process
        """
        logging.debug(f"Coordinator received request: {request}")

        # Generate a chat ID that will be used to collaborate on this query
        sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'

        ctx = WiseAgentRegistry.create_sub_context(request.context_name, sub_ctx_name)
        ctx.set_collaboration_type(WiseAgentCollaborationType.PHASED)
        ctx.set_route_response_to(request.sender)
        logging.debug(f"Registred context: {WiseAgentRegistry.get_context(ctx.name)}")
        # Determine the agents required to answer the query
        agent_selection_prompt = ("Given the following query and a description of the agents that are available," +
                                  " determine all of the agents that could be required to solve the query." +
                                  " Format the response as a space separated list of agent names and don't include " +
                                  " anything else in the response.\n" +
                                  " Query: " + request.message + "\n" + "Available agents:\n" +
                                  "\n".join(WiseAgentRegistry.get_agent_names_and_descriptions()) + "\n")
        if self.metadata.system_message or self.llm.system_message:
            ctx.append_chat_completion(messages={"role": "system", "content": self.metadata.system_message or self.llm.system_message})
        ctx.append_chat_completion(messages={"role": "user", "content": agent_selection_prompt})

        logging.debug(f"messages: {ctx.llm_chat_completion}")
        llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
        ctx.append_chat_completion(messages=llm_response.choices[0].message)

        # Assign the agents to phases
        agent_assignment_prompt = ("Assign each of the agents that will be required to solve the query to one of the following phases:\n" +
                                   ", ".join(self.phases) + "\n" +
                                   "Assume that agents within a phase will be executed in parallel." +
                                   " Format the response as a space separated list of agents for each phase, where the first"
                                   " line contains the list of agents for the first phase and second line contains the list of"
                                   " agents for the second phase and so on. Don't include anything else in the response.\n")
        ctx.append_chat_completion(messages={"role": "user", "content": agent_assignment_prompt})
        llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
        ctx.append_chat_completion(messages=llm_response.choices[0].message)     
        phases = [phase.split() for phase in llm_response.choices[0].message.content.splitlines()]
        ctx.set_agent_phase_assignments(phases)
        ctx.set_current_phase(0)
        ctx.add_query(request.message)

        # Kick off the first phase
        for agent in phases[0]:
            self.send_request(WiseAgentMessage(message=request.message, sender=self.name,
                                               context_name=ctx.name), agent)

    def process_response(self, response : WiseAgentMessage):
        """
        Process a response message. If this message is from the last agent remaining in the current phase, then
        kick off the next phase of collaboration if there are more phases. Otherwise, determine if we should
        return the final answer or if we need to go back to the first phase and repeat with a rephrased query.

        Args:
            response (WiseAgentMessage): the response message to process
        """
        ctx = WiseAgentRegistry.get_context(response.context_name)

        if response.message_type != WiseAgentMessageType.ACK:
            raise ValueError(f"Unexpected response message_type: {response.message_type} with message: {response.message}")

        # Remove the agent from the required agents for this phase
        ctx.remove_required_agent_for_current_phase(response.sender)

        # If there are no more agents remaining in this phase, move on to the next phase,
        # return the final answer, or iterate
        if len(ctx.get_required_agents_for_current_phase()) == 0:
            next_phase = ctx.get_agents_for_next_phase()
            if next_phase is None:
                # Determine the final answer
                final_answer_prompt = ("What is the final answer for the original query? Provide the answer followed" +
                                       " by a confidence score from 0 to 100 to indicate how certain you are of the" +
                                       " answer. Format the response with just the answer first followed by just" +
                                       " the confidence score on the next line. For example:\n" +
                                       " Your answer goes here.\n"
                                       " 85\n")
                ctx.append_chat_completion(messages={"role": "user", "content": final_answer_prompt})
                llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
                final_answer_and_score = llm_response.choices[0].message.content.splitlines()
                final_answer = "\n".join(final_answer_and_score[:-1])
                if final_answer_and_score[-1].strip().isnumeric():
                    score = int(final_answer_and_score[-1])
                else:
                    # A score could not be determined
                    score = 0

                # Determine if we should return the final answer or iterate
                if score >= self.confidence_score_threshold:
                    self.send_response(WiseAgentMessage(message=final_answer, sender=self.name,
                                                        context_name=response.context_name), ctx.get_route_response_to())
                elif len(ctx.get_queries()) == self.max_iterations:
                    self.send_response(WiseAgentMessage(message=CANNOT_ANSWER, message_type=WiseAgentMessageType.CANNOT_ANSWER,
                                                        sender=self.name, context_name=response.context_name),
                                       ctx.get_route_response_to())
                else:
                    # Rephrase the query and iterate
                    if len(ctx.get_queries()) < self.max_iterations:
                        rephrase_query_prompt = ("The final answer was not considered good enough to respond to the original query.\n" +
                                                 " The original query was: " + ctx.get_queries()[0] + "\n" +
                                                 " Your task is to analyze the original query for its intent along with the conversation" +
                                                 " history and final answer to rephrase the original query to yield a better final answer." +
                                                 " The response should contain only the rephrased query."
                                                 " Don't include anything else in the response.\n")
                        ctx.append_chat_completion(messages={"role": "user", "content": rephrase_query_prompt})
                        # Note that llm_chat_completion is being used here so we have the full history
                        llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
                        rephrased_query = llm_response.choices[0].message.content
                        ctx.append_chat_completion(messages=llm_response.choices[0].message)
                        ctx.set_current_phase(0)
                        ctx.add_query(rephrased_query)
                        for agent in ctx.get_required_agents_for_current_phase():
                            self.send_request(WiseAgentMessage(message=rephrased_query, sender=self.name,
                                                               context_name=response.context_name),
                                              agent)
            else:
                # Kick off the next phase
                for agent in next_phase:
                    self.send_request(WiseAgentMessage(message=ctx.get_current_query(), sender=self.name,
                                                       context_name=response.context_name), agent)
        return True

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def response_delivery(self) -> Optional[Callable[[], WiseAgentMessage]]:
        """
        Get the function to deliver the response to the client.
        Returns:
            (Callable[[], WiseAgentMessage]): the function to deliver the response to the client
        """
        return self._response_delivery

    def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
        """
        Set the function to deliver the response to the client.
        Args:
            response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the
        """
        self._response_delivery = response_delivery

confidence_score_threshold: int property

Get the confidence score threshold.

max_iterations: int property

Get the maximum number of iterations.

name: str property

Get the name of the agent.

phases: List[str] property

Get the list of phases.

response_delivery: Optional[Callable[[], WiseAgentMessage]] property

Get the function to deliver the response to the client. Returns: (Callable[[], WiseAgentMessage]): the function to deliver the response to the client

__init__(name, metadata, transport, llm, phases=None, max_iterations=MAX_ITERATIONS_FOR_COORDINATOR, confidence_score_threshold=CONFIDENCE_SCORE_THRESHOLD)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • llm (WiseAgentLLM) –

    the LLM to use for coordinating the collaboration

  • phases (Optional[List[str]], default: None ) –

    the optional list of phase names, defaults to "Data Collection" and "Data Analysis"

  • max_iterations (Optional[int], default: MAX_ITERATIONS_FOR_COORDINATOR ) –

    the maximum number of iterations to run the phases, defaults to 5

  • confidence_score_threshold (Optional[int], default: CONFIDENCE_SCORE_THRESHOLD ) –

    the confidence score threshold to determine if the final answer

Source code in wiseagents/agents/coordinator_wise_agents.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
def __init__(self, name: str, metadata: WiseAgentMetaData, transport: WiseAgentTransport, llm: WiseAgentLLM,
             phases: Optional[List[str]] = None, max_iterations: Optional[int] = MAX_ITERATIONS_FOR_COORDINATOR,
             confidence_score_threshold: Optional[int] = CONFIDENCE_SCORE_THRESHOLD):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        transport (WiseAgentTransport): the transport to use for communication
        llm (WiseAgentLLM): the LLM to use for coordinating the collaboration
        phases (Optional[List[str]]): the optional list of phase names, defaults to "Data Collection" and "Data Analysis"
        max_iterations (Optional[int]): the maximum number of iterations to run the phases, defaults to 5
        confidence_score_threshold (Optional[int]): the confidence score threshold to determine if the final answer
        is acceptable, defaults to 85
    """
    self._name = name
    self._phases = phases if phases is not None else ["Data Collection", "Data Analysis"]
    self._max_iterations = max_iterations
    self._confidence_score_threshold = confidence_score_threshold
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the instance variables.

Source code in wiseagents/agents/coordinator_wise_agents.py
158
159
160
161
162
163
164
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the instance variables."""
    obj = super().__new__(cls)
    obj._phases = ["Data Collection", "Data Analysis"]
    obj._max_iterations = MAX_ITERATIONS_FOR_COORDINATOR
    obj._confidence_score_threshold = CONFIDENCE_SCORE_THRESHOLD
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/coordinator_wise_agents.py
188
189
190
191
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, transport={self.transport},"
            f"llm={self.llm}, phases={self.phases},max_iterations={self.max_iterations}")

handle_request(request)

Process a request message by kicking off the collaboration in phases.

Parameters:
Source code in wiseagents/agents/coordinator_wise_agents.py
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
def handle_request(self, request):
    """
    Process a request message by kicking off the collaboration in phases.

    Args:
        request (WiseAgentMessage): the request message to process
    """
    logging.debug(f"Coordinator received request: {request}")

    # Generate a chat ID that will be used to collaborate on this query
    sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'

    ctx = WiseAgentRegistry.create_sub_context(request.context_name, sub_ctx_name)
    ctx.set_collaboration_type(WiseAgentCollaborationType.PHASED)
    ctx.set_route_response_to(request.sender)
    logging.debug(f"Registred context: {WiseAgentRegistry.get_context(ctx.name)}")
    # Determine the agents required to answer the query
    agent_selection_prompt = ("Given the following query and a description of the agents that are available," +
                              " determine all of the agents that could be required to solve the query." +
                              " Format the response as a space separated list of agent names and don't include " +
                              " anything else in the response.\n" +
                              " Query: " + request.message + "\n" + "Available agents:\n" +
                              "\n".join(WiseAgentRegistry.get_agent_names_and_descriptions()) + "\n")
    if self.metadata.system_message or self.llm.system_message:
        ctx.append_chat_completion(messages={"role": "system", "content": self.metadata.system_message or self.llm.system_message})
    ctx.append_chat_completion(messages={"role": "user", "content": agent_selection_prompt})

    logging.debug(f"messages: {ctx.llm_chat_completion}")
    llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
    ctx.append_chat_completion(messages=llm_response.choices[0].message)

    # Assign the agents to phases
    agent_assignment_prompt = ("Assign each of the agents that will be required to solve the query to one of the following phases:\n" +
                               ", ".join(self.phases) + "\n" +
                               "Assume that agents within a phase will be executed in parallel." +
                               " Format the response as a space separated list of agents for each phase, where the first"
                               " line contains the list of agents for the first phase and second line contains the list of"
                               " agents for the second phase and so on. Don't include anything else in the response.\n")
    ctx.append_chat_completion(messages={"role": "user", "content": agent_assignment_prompt})
    llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
    ctx.append_chat_completion(messages=llm_response.choices[0].message)     
    phases = [phase.split() for phase in llm_response.choices[0].message.content.splitlines()]
    ctx.set_agent_phase_assignments(phases)
    ctx.set_current_phase(0)
    ctx.add_query(request.message)

    # Kick off the first phase
    for agent in phases[0]:
        self.send_request(WiseAgentMessage(message=request.message, sender=self.name,
                                           context_name=ctx.name), agent)

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/coordinator_wise_agents.py
337
338
339
340
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/coordinator_wise_agents.py
333
334
335
def process_event(self, event):
    """Do nothing"""
    return True

process_response(response)

Process a response message. If this message is from the last agent remaining in the current phase, then kick off the next phase of collaboration if there are more phases. Otherwise, determine if we should return the final answer or if we need to go back to the first phase and repeat with a rephrased query.

Parameters:
Source code in wiseagents/agents/coordinator_wise_agents.py
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
def process_response(self, response : WiseAgentMessage):
    """
    Process a response message. If this message is from the last agent remaining in the current phase, then
    kick off the next phase of collaboration if there are more phases. Otherwise, determine if we should
    return the final answer or if we need to go back to the first phase and repeat with a rephrased query.

    Args:
        response (WiseAgentMessage): the response message to process
    """
    ctx = WiseAgentRegistry.get_context(response.context_name)

    if response.message_type != WiseAgentMessageType.ACK:
        raise ValueError(f"Unexpected response message_type: {response.message_type} with message: {response.message}")

    # Remove the agent from the required agents for this phase
    ctx.remove_required_agent_for_current_phase(response.sender)

    # If there are no more agents remaining in this phase, move on to the next phase,
    # return the final answer, or iterate
    if len(ctx.get_required_agents_for_current_phase()) == 0:
        next_phase = ctx.get_agents_for_next_phase()
        if next_phase is None:
            # Determine the final answer
            final_answer_prompt = ("What is the final answer for the original query? Provide the answer followed" +
                                   " by a confidence score from 0 to 100 to indicate how certain you are of the" +
                                   " answer. Format the response with just the answer first followed by just" +
                                   " the confidence score on the next line. For example:\n" +
                                   " Your answer goes here.\n"
                                   " 85\n")
            ctx.append_chat_completion(messages={"role": "user", "content": final_answer_prompt})
            llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
            final_answer_and_score = llm_response.choices[0].message.content.splitlines()
            final_answer = "\n".join(final_answer_and_score[:-1])
            if final_answer_and_score[-1].strip().isnumeric():
                score = int(final_answer_and_score[-1])
            else:
                # A score could not be determined
                score = 0

            # Determine if we should return the final answer or iterate
            if score >= self.confidence_score_threshold:
                self.send_response(WiseAgentMessage(message=final_answer, sender=self.name,
                                                    context_name=response.context_name), ctx.get_route_response_to())
            elif len(ctx.get_queries()) == self.max_iterations:
                self.send_response(WiseAgentMessage(message=CANNOT_ANSWER, message_type=WiseAgentMessageType.CANNOT_ANSWER,
                                                    sender=self.name, context_name=response.context_name),
                                   ctx.get_route_response_to())
            else:
                # Rephrase the query and iterate
                if len(ctx.get_queries()) < self.max_iterations:
                    rephrase_query_prompt = ("The final answer was not considered good enough to respond to the original query.\n" +
                                             " The original query was: " + ctx.get_queries()[0] + "\n" +
                                             " Your task is to analyze the original query for its intent along with the conversation" +
                                             " history and final answer to rephrase the original query to yield a better final answer." +
                                             " The response should contain only the rephrased query."
                                             " Don't include anything else in the response.\n")
                    ctx.append_chat_completion(messages={"role": "user", "content": rephrase_query_prompt})
                    # Note that llm_chat_completion is being used here so we have the full history
                    llm_response = self.llm.process_chat_completion(ctx.llm_chat_completion, tools=[])
                    rephrased_query = llm_response.choices[0].message.content
                    ctx.append_chat_completion(messages=llm_response.choices[0].message)
                    ctx.set_current_phase(0)
                    ctx.add_query(rephrased_query)
                    for agent in ctx.get_required_agents_for_current_phase():
                        self.send_request(WiseAgentMessage(message=rephrased_query, sender=self.name,
                                                           context_name=response.context_name),
                                          agent)
        else:
            # Kick off the next phase
            for agent in next_phase:
                self.send_request(WiseAgentMessage(message=ctx.get_current_query(), sender=self.name,
                                                   context_name=response.context_name), agent)
    return True

set_response_delivery(response_delivery)

Set the function to deliver the response to the client. Args: response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the

Source code in wiseagents/agents/coordinator_wise_agents.py
360
361
362
363
364
365
366
def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
    """
    Set the function to deliver the response to the client.
    Args:
        response_delivery (Callable[[], WiseAgentMessage]): the function to deliver the response to the
    """
    self._response_delivery = response_delivery

stop()

Do nothing

Source code in wiseagents/agents/coordinator_wise_agents.py
342
343
344
def stop(self):
    """Do nothing"""
    pass

RAGWiseAgent

Bases: WiseAgent

This agent makes use of retrieval augmented generation (RAG) to answer questions.

Source code in wiseagents/agents/rag_wise_agents.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
class RAGWiseAgent(WiseAgent):
    """
    This agent makes use of retrieval augmented generation (RAG) to answer questions.
    """
    yaml_tag = u'!wiseagents.agents.RAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._include_sources = DEFAULT_INCLUDE_SOURCES
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
                 transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
                 k: Optional[int] = DEFAULT_NUM_DOCUMENTS, include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM to use for processing requests
            vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            collection_name Optional(str): the name of the collection within the vector database to use for
            retrieving documents, defaults to wise-agent-collection
            k Optional(int): the number of documents to retrieve for each query, defaults to 4
            include_sources Optional(bool): whether to include the sources of the documents that were consulted to
            produce the response, defaults to False
        """
        self._k = k
        self._include_sources = include_sources
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                         vector_db=vector_db, collection_name=collection_name)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"vector_db={self.vector_db}, collection_name={self.collection_name}, transport={self.transport},"
                f"k={self.k}, include_sources={self.include_sources}))")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message using retrieval augmented generation (RAG).

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using RAG")
        retrieved_documents = retrieve_documents_for_rag(request.message, self.vector_db, self.collection_name, self.k)
        llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm,
                                                                  self.include_sources, conversation_history,
                                                                  self.metadata.system_message)
        return llm_response_with_sources

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def k(self) -> int:
        """Get the number of documents to retrieve for each query."""
        return self._k

    @property
    def include_sources(self) -> bool:
        """Get whether to include the sources of the documents that were consulted to produce the response."""
        return self._include_sources

include_sources: bool property

Get whether to include the sources of the documents that were consulted to produce the response.

k: int property

Get the number of documents to retrieve for each query.

name: str property

Get the name of the agent.

__init__(name, metadata, llm, vector_db, transport, collection_name=DEFAULT_COLLECTION_NAME, k=DEFAULT_NUM_DOCUMENTS, include_sources=DEFAULT_INCLUDE_SOURCES)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM to use for processing requests

  • vector_db (WiseAgentVectorDB) –

    the vector database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • collection_name (Optional(str, default: DEFAULT_COLLECTION_NAME ) –

    the name of the collection within the vector database to use for

  • k (Optional(int, default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve for each query, defaults to 4

  • include_sources (Optional(bool, default: DEFAULT_INCLUDE_SOURCES ) –

    whether to include the sources of the documents that were consulted to

Source code in wiseagents/agents/rag_wise_agents.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
             transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
             k: Optional[int] = DEFAULT_NUM_DOCUMENTS, include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM to use for processing requests
        vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        collection_name Optional(str): the name of the collection within the vector database to use for
        retrieving documents, defaults to wise-agent-collection
        k Optional(int): the number of documents to retrieve for each query, defaults to 4
        include_sources Optional(bool): whether to include the sources of the documents that were consulted to
        produce the response, defaults to False
    """
    self._k = k
    self._include_sources = include_sources
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                     vector_db=vector_db, collection_name=collection_name)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
33
34
35
36
37
38
39
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._include_sources = DEFAULT_INCLUDE_SOURCES
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
64
65
66
67
68
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"vector_db={self.vector_db}, collection_name={self.collection_name}, transport={self.transport},"
            f"k={self.k}, include_sources={self.include_sources}))")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
74
75
76
77
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
70
71
72
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message using retrieval augmented generation (RAG).

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • Optional[str]

    Optional[str]: the response to the request message as a string or None if there is

  • Optional[str]

    no string response yet

Source code in wiseagents/agents/rag_wise_agents.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message using retrieval augmented generation (RAG).

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using RAG")
    retrieved_documents = retrieve_documents_for_rag(request.message, self.vector_db, self.collection_name, self.k)
    llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm,
                                                              self.include_sources, conversation_history,
                                                              self.metadata.system_message)
    return llm_response_with_sources

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
100
101
102
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
104
105
106
def stop(self):
    """Do nothing"""
    pass

SequentialCoordinatorWiseAgent

Bases: WiseAgent

This agent will coordinate the execution of a sequence of agents. Use Stomp protocol.

Source code in wiseagents/agents/coordinator_wise_agents.py
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
class SequentialCoordinatorWiseAgent(WiseAgent):
    """
    This agent will coordinate the execution of a sequence of agents.
    Use Stomp protocol.
    """
    yaml_tag = u'!wiseagents.agents.SequentialCoordinatorWiseAgent'


    def __init__(self, name: str, metadata: WiseAgentMetaData, transport: WiseAgentTransport, agents: List[str]):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            transport (WiseAgentTransport): the transport to use for communication
            agents (List[str]): the list of agents to coordinate
        """
        self._name = name
        self._agents = agents
        super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

    def __repr__(self):
        """Return a string representation of the agent."""
        return f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, agents={self.agents})"

    def handle_request(self, request):
        """
        Process a request message by passing it to the first agent in the sequence.

        Args:
            request (WiseAgentMessage): the request message to process
        """
        logging.debug(f"Sequential coordinator received request: {request}")

        # Generate a chat ID that will be used to collaborate on this query
        sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'

        ctx = WiseAgentRegistry.create_sub_context(request.context_name, sub_ctx_name)
        ctx.set_collaboration_type(WiseAgentCollaborationType.SEQUENTIAL)
        ctx.set_agents_sequence(self._agents)
        ctx.set_route_response_to(request.sender)
        self.send_request(WiseAgentMessage(message=request.message, sender=self.name, context_name=ctx.name), self._agents[0])

    def process_response(self, response):
        """
        Process a response message by passing it to the next agent in the sequence.

        Args:
            response (WiseAgentMessage): the response message to process
        """
        if response.message:
            raise ValueError(f"Unexpected response message: {response.message}")
        return True

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def agents(self) -> List[str]:
        """Get the list of agents."""
        return self._agents

    @property
    def response_delivery(self) -> Optional[Callable[[], WiseAgentMessage]]:
        """
        Get the function to deliver the response to the client.

        Returns:
            (Callable[[], WiseAgentMessage]): the function to deliver the response to the client
        """
        return self._response_delivery

    def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
        """Set the function to deliver the response to the client."""
        self._response_delivery = response_delivery

agents: List[str] property

Get the list of agents.

name: str property

Get the name of the agent.

response_delivery: Optional[Callable[[], WiseAgentMessage]] property

Get the function to deliver the response to the client.

Returns:
  • Callable[[], WiseAgentMessage]

    the function to deliver the response to the client

__init__(name, metadata, transport, agents)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • agents (List[str]) –

    the list of agents to coordinate

Source code in wiseagents/agents/coordinator_wise_agents.py
20
21
22
23
24
25
26
27
28
29
30
31
32
def __init__(self, name: str, metadata: WiseAgentMetaData, transport: WiseAgentTransport, agents: List[str]):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        transport (WiseAgentTransport): the transport to use for communication
        agents (List[str]): the list of agents to coordinate
    """
    self._name = name
    self._agents = agents
    super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/coordinator_wise_agents.py
34
35
36
def __repr__(self):
    """Return a string representation of the agent."""
    return f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, agents={self.agents})"

handle_request(request)

Process a request message by passing it to the first agent in the sequence.

Parameters:
Source code in wiseagents/agents/coordinator_wise_agents.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def handle_request(self, request):
    """
    Process a request message by passing it to the first agent in the sequence.

    Args:
        request (WiseAgentMessage): the request message to process
    """
    logging.debug(f"Sequential coordinator received request: {request}")

    # Generate a chat ID that will be used to collaborate on this query
    sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'

    ctx = WiseAgentRegistry.create_sub_context(request.context_name, sub_ctx_name)
    ctx.set_collaboration_type(WiseAgentCollaborationType.SEQUENTIAL)
    ctx.set_agents_sequence(self._agents)
    ctx.set_route_response_to(request.sender)
    self.send_request(WiseAgentMessage(message=request.message, sender=self.name, context_name=ctx.name), self._agents[0])

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/coordinator_wise_agents.py
71
72
73
74
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/coordinator_wise_agents.py
67
68
69
def process_event(self, event):
    """Do nothing"""
    return True

process_response(response)

Process a response message by passing it to the next agent in the sequence.

Parameters:
Source code in wiseagents/agents/coordinator_wise_agents.py
56
57
58
59
60
61
62
63
64
65
def process_response(self, response):
    """
    Process a response message by passing it to the next agent in the sequence.

    Args:
        response (WiseAgentMessage): the response message to process
    """
    if response.message:
        raise ValueError(f"Unexpected response message: {response.message}")
    return True

set_response_delivery(response_delivery)

Set the function to deliver the response to the client.

Source code in wiseagents/agents/coordinator_wise_agents.py
100
101
102
def set_response_delivery(self, response_delivery: Callable[[], WiseAgentMessage]):
    """Set the function to deliver the response to the client."""
    self._response_delivery = response_delivery

stop()

Do nothing

Source code in wiseagents/agents/coordinator_wise_agents.py
76
77
78
def stop(self):
    """Do nothing"""
    pass

SequentialMemoryCoordinatorWiseAgent

Bases: WiseAgent

Source code in wiseagents/agents/coordinator_wise_agents.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
class SequentialMemoryCoordinatorWiseAgent(WiseAgent):
    yaml_tag = u'!wiseagents.agents.SequentialMemoryCoordinatorWiseAgent'

    def __init__(self, name: str, metadata: WiseAgentMetaData, transport: WiseAgentTransport, agents: List[str]):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            transport (WiseAgentTransport): the transport to use for communication
            agents (List[str]): the list of agents to coordinate
        """
        self._name = name
        self._agents = agents
        super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

    def __repr__(self):
        """Return a string representation of the agent."""
        return f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, agents={self.agents})"


    def handle_request(self, request):
        """
        Process a request message by passing it to the first agent in the sequence.

        Args:
            request (WiseAgentMessage): the request message to process
        """
        print(f"[{self.name}] Received a message from {request.sender}. Starting to kick off the sequence of agents")

        # Generate a chat ID that will be used to collaborate on this query
        sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'

        ctx = WiseAgentRegistry.create_sub_context(request.context_name, sub_ctx_name)
        ctx.set_collaboration_type(WiseAgentCollaborationType.SEQUENTIAL_MEMORY)
        if self.metadata.system_message:
            ctx.append_chat_completion(messages={"role": "system", "content": self.metadata.system_message})

        ctx.set_agents_sequence(self._agents)
        ctx.set_route_response_to(request.sender)
        ctx.add_query(request.message)
        self.send_request(WiseAgentMessage(message=request.message, sender=self.name, context_name=ctx.name), self._agents[0])

__init__(name, metadata, transport, agents)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • agents (List[str]) –

    the list of agents to coordinate

Source code in wiseagents/agents/coordinator_wise_agents.py
107
108
109
110
111
112
113
114
115
116
117
118
119
def __init__(self, name: str, metadata: WiseAgentMetaData, transport: WiseAgentTransport, agents: List[str]):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        transport (WiseAgentTransport): the transport to use for communication
        agents (List[str]): the list of agents to coordinate
    """
    self._name = name
    self._agents = agents
    super().__init__(name=name, metadata=metadata, transport=transport, llm=None)

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/coordinator_wise_agents.py
121
122
123
def __repr__(self):
    """Return a string representation of the agent."""
    return f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, agents={self.agents})"

handle_request(request)

Process a request message by passing it to the first agent in the sequence.

Parameters:
Source code in wiseagents/agents/coordinator_wise_agents.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
def handle_request(self, request):
    """
    Process a request message by passing it to the first agent in the sequence.

    Args:
        request (WiseAgentMessage): the request message to process
    """
    print(f"[{self.name}] Received a message from {request.sender}. Starting to kick off the sequence of agents")

    # Generate a chat ID that will be used to collaborate on this query
    sub_ctx_name = f'{self.name}.{str(uuid.uuid4())}'

    ctx = WiseAgentRegistry.create_sub_context(request.context_name, sub_ctx_name)
    ctx.set_collaboration_type(WiseAgentCollaborationType.SEQUENTIAL_MEMORY)
    if self.metadata.system_message:
        ctx.append_chat_completion(messages={"role": "system", "content": self.metadata.system_message})

    ctx.set_agents_sequence(self._agents)
    ctx.set_route_response_to(request.sender)
    ctx.add_query(request.message)
    self.send_request(WiseAgentMessage(message=request.message, sender=self.name, context_name=ctx.name), self._agents[0])