DEFAULT_COLLECTION_NAME = 'wise-agents-collection' module-attribute

The default value for whether to include the sources of the documents that were consulted to produce the response when using retrieval augmented generation (RAG).

DEFAULT_INCLUDE_SOURCES = False module-attribute

The default number of verification questions to use when challenging the results retrieved from retrieval augmented generation (RAG).

DEFAULT_NUM_DOCUMENTS = 4 module-attribute

The default collection name to use during retrieval augmented generation (RAG).

BaseCoVeChallengerWiseAgent

Bases: WiseAgent

This abstract agent implementation is used to challenge the response from a RAG or Graph RAG agent using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent hallucinations.

Source code in wiseagents/agents/rag_wise_agents.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
class BaseCoVeChallengerWiseAgent(WiseAgent):
    """
    This abstract agent implementation is used to challenge the response from a RAG or Graph RAG agent
    using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent
    hallucinations.
    """

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        enforce_no_abstract_class_instances(cls, BaseCoVeChallengerWiseAgent)
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
        obj._vector_db = None
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._graph_db = None
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, transport: WiseAgentTransport,
                 k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS,
                 vector_db: Optional[WiseAgentVectorDB] = None, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
                 graph_db: Optional[WiseAgentGraphDB] = None):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            transport (WiseAgentTransport): the transport to use for communication
            k Optional(int): the number of documents to retrieve from the vector database, defaults to 4
            num_verification_questions Optional(int): the number of verification questions to generate, defaults to 4
            vector_db (Optional[WiseAgentVectorDB]): the vector DB associated with the agent (to be used for challenging RAG results)
            collection_name (Optional[str]) = "wise-agent-collection": the vector DB collection name associated with the agent
            graph_db (Optional[WiseAgentGraphDB]): the graph DB associated with the agent (to be used for challenging Graph RAG results)
        """
        self._k = k
        self._num_verification_questions = num_verification_questions
        self._vector_db = vector_db
        llm_agent = llm
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm_agent,
                         vector_db=vector_db, collection_name=collection_name, graph_db=graph_db)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"k={self.k}, num_verification_questions={self._num_verification_questions},"
                f"transport={self.transport}, vector_db={self.vector_db}, collection_name={self.collection_name},"
                f"graph_db={self.graph_db})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage,
                        conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a message containing a question and a baseline response to the question
        by challenging the baseline response to generate a revised response to the original question.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            str: the response to the request message as a string
        """
        return self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def k(self) -> int:
        """Get the number of documents to retrieve."""
        return self._k

    @property
    def num_verification_questions(self) -> int:
        """Get the number of verification questions to generate."""
        return self._num_verification_questions

    def create_and_process_chain_of_verification_prompts(self, message: str,
                                                         conversation_history: List[ChatCompletionMessageParam]) -> str:
        """
        Create prompts to challenge the baseline response to a question to try to generate a revised response
        to the original question.

        Args:
            message (str): the message containing the question and baseline response
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.
        """

        # plan verifications, taking into account the baseline response and conversation history
        prompt = (f"Given the following question and baseline response, generate a list of {self.num_verification_questions} "
                  f" verification questions that could help determine if there are any mistakes in the baseline response:"
                  f"\n{message}\n"
                  f"Your response should contain only the list of questions, one per line.\n")
        if self.metadata.system_message or self.llm.system_message:
            conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
        conversation_history.append({"role": "user", "content": prompt})
        llm_response = self.llm.process_chat_completion(conversation_history, [])

        # execute verifications, answering questions independently, without the baseline response
        verification_questions = llm_response.choices[0].message.content.splitlines()[:self.num_verification_questions]
        verification_responses = ""
        for question in verification_questions:
            retrieved_documents = self.retrieve_documents(question)
            llm_response = create_and_process_rag_prompt(retrieved_documents, question, self.llm, False,
                                          [], self.metadata.system_message)
            verification_responses = (verification_responses + "Verification Question: " + question + "\n"
                                      + "Verification Result: " + llm_response + "\n")

        # generate the final revised response, conditioned on the baseline response and verification results
        complete_info = message + "\n" + verification_responses
        prompt = (f"Given the following question, baseline response, and a list of verification questions and results,"
                  f" generate a revised response incorporating the verification results:\n{complete_info}\n"
                  f"Your response must contain only the revised response to the question in the JSON format shown below:\n"
                  f"{{'revised': 'Your revised response to the question.'}}\n")

        if self.metadata.system_message or self.llm.system_message:
            conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
        conversation_history.append({"role": "user", "content": prompt})
        llm_response = self.llm.process_chat_completion(conversation_history, [])
        return llm_response.choices[0].message.content

    @abstractmethod
    def retrieve_documents(self, question: str) -> List[Document]:
        """
        Retrieve documents to be used as the context for a RAG or Graph RAG prompt.

        Args:
            question (str): the question to be used to retrieve the documents

        Returns:
            List[Document]: the list of documents retrieved for the question
        """
        ...

k: int property

Get the number of documents to retrieve.

name: str property

Get the name of the agent.

num_verification_questions: int property

Get the number of verification questions to generate.

__init__(name, metadata, llm, transport, k=DEFAULT_NUM_DOCUMENTS, num_verification_questions=DEFAULT_NUM_VERIFICATION_QUESTIONS, vector_db=None, collection_name=DEFAULT_COLLECTION_NAME, graph_db=None)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • k (Optional(int, default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve from the vector database, defaults to 4

  • num_verification_questions (Optional(int, default: DEFAULT_NUM_VERIFICATION_QUESTIONS ) –

    the number of verification questions to generate, defaults to 4

  • vector_db (Optional[WiseAgentVectorDB], default: None ) –

    the vector DB associated with the agent (to be used for challenging RAG results)

  • collection_name (Optional[str]) = "wise-agent-collection", default: DEFAULT_COLLECTION_NAME ) –

    the vector DB collection name associated with the agent

  • graph_db (Optional[WiseAgentGraphDB], default: None ) –

    the graph DB associated with the agent (to be used for challenging Graph RAG results)

Source code in wiseagents/agents/rag_wise_agents.py
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, transport: WiseAgentTransport,
             k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS,
             vector_db: Optional[WiseAgentVectorDB] = None, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
             graph_db: Optional[WiseAgentGraphDB] = None):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        transport (WiseAgentTransport): the transport to use for communication
        k Optional(int): the number of documents to retrieve from the vector database, defaults to 4
        num_verification_questions Optional(int): the number of verification questions to generate, defaults to 4
        vector_db (Optional[WiseAgentVectorDB]): the vector DB associated with the agent (to be used for challenging RAG results)
        collection_name (Optional[str]) = "wise-agent-collection": the vector DB collection name associated with the agent
        graph_db (Optional[WiseAgentGraphDB]): the graph DB associated with the agent (to be used for challenging Graph RAG results)
    """
    self._k = k
    self._num_verification_questions = num_verification_questions
    self._vector_db = vector_db
    llm_agent = llm
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm_agent,
                     vector_db=vector_db, collection_name=collection_name, graph_db=graph_db)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
253
254
255
256
257
258
259
260
261
262
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    enforce_no_abstract_class_instances(cls, BaseCoVeChallengerWiseAgent)
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
    obj._vector_db = None
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._graph_db = None
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
290
291
292
293
294
295
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"k={self.k}, num_verification_questions={self._num_verification_questions},"
            f"transport={self.transport}, vector_db={self.vector_db}, collection_name={self.collection_name},"
            f"graph_db={self.graph_db})")

create_and_process_chain_of_verification_prompts(message, conversation_history)

Create prompts to challenge the baseline response to a question to try to generate a revised response to the original question.

Parameters:
  • message (str) –

    the message containing the question and baseline response

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Source code in wiseagents/agents/rag_wise_agents.py
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
def create_and_process_chain_of_verification_prompts(self, message: str,
                                                     conversation_history: List[ChatCompletionMessageParam]) -> str:
    """
    Create prompts to challenge the baseline response to a question to try to generate a revised response
    to the original question.

    Args:
        message (str): the message containing the question and baseline response
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.
    """

    # plan verifications, taking into account the baseline response and conversation history
    prompt = (f"Given the following question and baseline response, generate a list of {self.num_verification_questions} "
              f" verification questions that could help determine if there are any mistakes in the baseline response:"
              f"\n{message}\n"
              f"Your response should contain only the list of questions, one per line.\n")
    if self.metadata.system_message or self.llm.system_message:
        conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
    conversation_history.append({"role": "user", "content": prompt})
    llm_response = self.llm.process_chat_completion(conversation_history, [])

    # execute verifications, answering questions independently, without the baseline response
    verification_questions = llm_response.choices[0].message.content.splitlines()[:self.num_verification_questions]
    verification_responses = ""
    for question in verification_questions:
        retrieved_documents = self.retrieve_documents(question)
        llm_response = create_and_process_rag_prompt(retrieved_documents, question, self.llm, False,
                                      [], self.metadata.system_message)
        verification_responses = (verification_responses + "Verification Question: " + question + "\n"
                                  + "Verification Result: " + llm_response + "\n")

    # generate the final revised response, conditioned on the baseline response and verification results
    complete_info = message + "\n" + verification_responses
    prompt = (f"Given the following question, baseline response, and a list of verification questions and results,"
              f" generate a revised response incorporating the verification results:\n{complete_info}\n"
              f"Your response must contain only the revised response to the question in the JSON format shown below:\n"
              f"{{'revised': 'Your revised response to the question.'}}\n")

    if self.metadata.system_message or self.llm.system_message:
        conversation_history.append({"role": "system", "content": self.metadata.system_message or self.llm.system_message})
    conversation_history.append({"role": "user", "content": prompt})
    llm_response = self.llm.process_chat_completion(conversation_history, [])
    return llm_response.choices[0].message.content

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
301
302
303
304
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
297
298
299
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a message containing a question and a baseline response to the question by challenging the baseline response to generate a revised response to the original question.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • str( Optional[str] ) –

    the response to the request message as a string

Source code in wiseagents/agents/rag_wise_agents.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
def process_request(self, request: WiseAgentMessage,
                    conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a message containing a question and a baseline response to the question
    by challenging the baseline response to generate a revised response to the original question.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        str: the response to the request message as a string
    """
    return self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
323
324
325
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

retrieve_documents(question) abstractmethod

Retrieve documents to be used as the context for a RAG or Graph RAG prompt.

Parameters:
  • question (str) –

    the question to be used to retrieve the documents

Returns:
  • List[Document]

    List[Document]: the list of documents retrieved for the question

Source code in wiseagents/agents/rag_wise_agents.py
392
393
394
395
396
397
398
399
400
401
402
403
@abstractmethod
def retrieve_documents(self, question: str) -> List[Document]:
    """
    Retrieve documents to be used as the context for a RAG or Graph RAG prompt.

    Args:
        question (str): the question to be used to retrieve the documents

    Returns:
        List[Document]: the list of documents retrieved for the question
    """
    ...

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
327
328
329
def stop(self):
    """Do nothing"""
    pass

CoVeChallengerGraphRAGWiseAgent

Bases: BaseCoVeChallengerWiseAgent

This agent implementation is used to challenge the response from a Graph RAG agent using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent hallucinations.

Source code in wiseagents/agents/rag_wise_agents.py
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
class CoVeChallengerGraphRAGWiseAgent(BaseCoVeChallengerWiseAgent):
    """
    This agent implementation is used to challenge the response from a Graph RAG agent using the
    Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent
    hallucinations.
    """
    yaml_tag = u'!wiseagents.agents.CoVeChallengerGraphRAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
        obj._retrieval_query = ""
        obj._params = None
        obj._metadata_filter = None
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, graph_db: WiseAgentGraphDB,
                 transport: WiseAgentTransport, k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS,
                 retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
                 metadata_filter: Optional[Dict[str, Any]] = None):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            graph_db (Optional[WiseAgentGraphDB]): the graph database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            k (Optional[int]): the number of documents to retrieve from the vector database, defaults to 4
            num_verification_questions (Optional[int]): the number of verification questions to generate, defaults to 4
            retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
            retrieved from a similarity search
            params (Optional[Dict[str, Any]]): the optional parameters for the query
            metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search
        """
        self._k = k
        self._num_verification_questions = num_verification_questions
        self._retrieval_query = retrieval_query
        self._params = params
        self._metadata_filter = metadata_filter
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                         graph_db=graph_db, k=k,
                         num_verification_questions=num_verification_questions)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"graph_db={self.graph_db}, k={self.k},num_verification_questions={self._num_verification_questions}"
                f"transport={self.transport}, retrieval_query={self.retrieval_query}, params={self.params}"
                f"metadata_filter={self.metadata_filter})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a message containing a question and a baseline response to the question
        by challenging the baseline response to generate a revised response to the original question.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            str: the response to the request message as a string
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to challenge it")
        llm_response = self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)
        return llm_response

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def retrieval_query(self) -> str:
        """Get the Cypher query to use to obtain sub-graphs connected to nodes retrieved from a similarity search."""
        return self._retrieval_query

    @property
    def params(self) -> Optional[Dict[str, Any]]:
        """Get the optional parameters for the query."""
        return self._params

    @property
    def metadata_filter(self) -> Optional[Dict[str, Any]]:
        """Get the optional metadata filter to use with similarity search."""
        return self._metadata_filter

    def retrieve_documents(self, question: str) -> List[Document]:
        return retrieve_documents_for_graph_rag(question, self.graph_db, self.k,
                                                self.retrieval_query, self.params, self.metadata_filter)

metadata_filter: Optional[Dict[str, Any]] property

Get the optional metadata filter to use with similarity search.

params: Optional[Dict[str, Any]] property

Get the optional parameters for the query.

retrieval_query: str property

Get the Cypher query to use to obtain sub-graphs connected to nodes retrieved from a similarity search.

__init__(name, metadata, llm, graph_db, transport, k=DEFAULT_NUM_DOCUMENTS, num_verification_questions=DEFAULT_NUM_VERIFICATION_QUESTIONS, retrieval_query='', params=None, metadata_filter=None)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • graph_db (Optional[WiseAgentGraphDB]) –

    the graph database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • k (Optional[int], default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve from the vector database, defaults to 4

  • num_verification_questions (Optional[int], default: DEFAULT_NUM_VERIFICATION_QUESTIONS ) –

    the number of verification questions to generate, defaults to 4

  • retrieval_query (Optional[str], default: '' ) –

    the optional retrieval query to use to obtain sub-graphs connected to nodes

  • params (Optional[Dict[str, Any]], default: None ) –

    the optional parameters for the query

  • metadata_filter (Optional[Dict[str, Any]], default: None ) –

    the optional metadata filter to use with similarity search

Source code in wiseagents/agents/rag_wise_agents.py
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, graph_db: WiseAgentGraphDB,
             transport: WiseAgentTransport, k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS,
             retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
             metadata_filter: Optional[Dict[str, Any]] = None):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        graph_db (Optional[WiseAgentGraphDB]): the graph database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        k (Optional[int]): the number of documents to retrieve from the vector database, defaults to 4
        num_verification_questions (Optional[int]): the number of verification questions to generate, defaults to 4
        retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
        retrieved from a similarity search
        params (Optional[Dict[str, Any]]): the optional parameters for the query
        metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search
    """
    self._k = k
    self._num_verification_questions = num_verification_questions
    self._retrieval_query = retrieval_query
    self._params = params
    self._metadata_filter = metadata_filter
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                     graph_db=graph_db, k=k,
                     num_verification_questions=num_verification_questions)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
498
499
500
501
502
503
504
505
506
507
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
    obj._retrieval_query = ""
    obj._params = None
    obj._metadata_filter = None
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
539
540
541
542
543
544
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"graph_db={self.graph_db}, k={self.k},num_verification_questions={self._num_verification_questions}"
            f"transport={self.transport}, retrieval_query={self.retrieval_query}, params={self.params}"
            f"metadata_filter={self.metadata_filter})")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
550
551
552
553
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
546
547
548
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a message containing a question and a baseline response to the question by challenging the baseline response to generate a revised response to the original question.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • str( Optional[str] ) –

    the response to the request message as a string

Source code in wiseagents/agents/rag_wise_agents.py
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a message containing a question and a baseline response to the question
    by challenging the baseline response to generate a revised response to the original question.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        str: the response to the request message as a string
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to challenge it")
    llm_response = self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)
    return llm_response

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
573
574
575
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
577
578
579
def stop(self):
    """Do nothing"""
    pass

CoVeChallengerRAGWiseAgent

Bases: BaseCoVeChallengerWiseAgent

This agent implementation is used to challenge the response from a RAG agent using the Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent hallucinations.

Source code in wiseagents/agents/rag_wise_agents.py
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
class CoVeChallengerRAGWiseAgent(BaseCoVeChallengerWiseAgent):
    """
    This agent implementation is used to challenge the response from a RAG agent using the
    Chain-of-Verification (CoVe) method (https://arxiv.org/pdf/2309.11495) to try to prevent
    hallucinations.
    """
    yaml_tag = u'!wiseagents.agents.CoVeChallengerRAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
                 transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
                 k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM agent to use for processing requests
            vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            collection_name (Optional[str]): the name of the collection to use in the vector database, defaults to wise-agents-collection
            k (Optional[int]): the number of documents to retrieve from the vector database, defaults to 4
            num_verification_questions (Optional[int]): the number of verification questions to generate, defaults to 4
        """
        self._k = k
        self._num_verification_questions = num_verification_questions
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                         vector_db=vector_db, collection_name=collection_name,
                         k=k, num_verification_questions=num_verification_questions)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"vector_db={self.vector_db}, collection_name={self.collection_name}, k={self.k},"
                f"num_verification_questions={self._num_verification_questions}, transport={self.transport})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a message containing a question and a baseline response to the question
        by challenging the baseline response to generate a revised response to the original question.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            str: the response to the request message as a string
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to challenge it")
        llm_response = self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)
        return llm_response

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    def retrieve_documents(self, question: str) -> List[Document]:
        return retrieve_documents_for_rag(question, self.vector_db, self.collection_name, self.k)

__init__(name, metadata, llm, vector_db, transport, collection_name=DEFAULT_COLLECTION_NAME, k=DEFAULT_NUM_DOCUMENTS, num_verification_questions=DEFAULT_NUM_VERIFICATION_QUESTIONS)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing requests

  • vector_db (WiseAgentVectorDB) –

    the vector database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • collection_name (Optional[str], default: DEFAULT_COLLECTION_NAME ) –

    the name of the collection to use in the vector database, defaults to wise-agents-collection

  • k (Optional[int], default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve from the vector database, defaults to 4

  • num_verification_questions (Optional[int], default: DEFAULT_NUM_VERIFICATION_QUESTIONS ) –

    the number of verification questions to generate, defaults to 4

Source code in wiseagents/agents/rag_wise_agents.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
             transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
             k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             num_verification_questions: Optional[int] = DEFAULT_NUM_VERIFICATION_QUESTIONS):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM agent to use for processing requests
        vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        collection_name (Optional[str]): the name of the collection to use in the vector database, defaults to wise-agents-collection
        k (Optional[int]): the number of documents to retrieve from the vector database, defaults to 4
        num_verification_questions (Optional[int]): the number of verification questions to generate, defaults to 4
    """
    self._k = k
    self._num_verification_questions = num_verification_questions
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                     vector_db=vector_db, collection_name=collection_name,
                     k=k, num_verification_questions=num_verification_questions)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
414
415
416
417
418
419
420
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._num_verification_questions = DEFAULT_NUM_VERIFICATION_QUESTIONS
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
445
446
447
448
449
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"vector_db={self.vector_db}, collection_name={self.collection_name}, k={self.k},"
            f"num_verification_questions={self._num_verification_questions}, transport={self.transport})")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
455
456
457
458
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
451
452
453
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a message containing a question and a baseline response to the question by challenging the baseline response to generate a revised response to the original question.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • str( Optional[str] ) –

    the response to the request message as a string

Source code in wiseagents/agents/rag_wise_agents.py
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a message containing a question and a baseline response to the question
    by challenging the baseline response to generate a revised response to the original question.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        str: the response to the request message as a string
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to challenge it")
    llm_response = self.create_and_process_chain_of_verification_prompts(request.message, conversation_history)
    return llm_response

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
478
479
480
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
482
483
484
def stop(self):
    """Do nothing"""
    pass

GraphRAGWiseAgent

Bases: WiseAgent

This agent implementation makes use of Graph Retrieval Augmented Generation (Graph RAG) to answer questions.

Source code in wiseagents/agents/rag_wise_agents.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
class GraphRAGWiseAgent(WiseAgent):
    """
    This agent implementation makes use of Graph Retrieval Augmented Generation (Graph RAG) to answer questions.
    """
    yaml_tag = u'!wiseagents.agents.GraphRAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._include_sources = DEFAULT_INCLUDE_SOURCES
        obj._retrieval_query = ""
        obj._params = None
        obj._metadata_filter = None
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, graph_db: WiseAgentGraphDB,
                 transport: WiseAgentTransport, k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
                 include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES,
                 retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
                 metadata_filter: Optional[Dict[str, Any]] = None):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM to use for processing requests
            graph_db (WiseAgentGraphDB): the graph database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            k (Optional[int]): the number of documents to retrieve for each query, defaults to 4
            include_sources Optional(bool): whether to include the sources of the documents that were consulted to
            produce the response, defaults to False
            retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
            retrieved from a similarity search
            params (Optional[Dict[str, Any]]): the optional parameters for the query
            metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search
        """
        self._k = k
        self._include_sources = include_sources
        self._retrieval_query = retrieval_query
        self._params = params
        self._metadata_filter = metadata_filter
        super().__init__(name=name, metadata=metadata, transport=self.transport, llm=llm,
                         graph_db=graph_db)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"graph_db={self.graph_db}, transport={self.transport}, k={self.k},"
                f"include_sources={self.include_sources}), retrieval_query={self.retrieval_query},"
                f"params={self.params}, metadata_filter={self.metadata_filter})")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message by passing it to the RAG agent and sending the response back to the client.

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using Graph RAG")
        retrieved_documents = retrieve_documents_for_graph_rag(request.message, self.graph_db, self.k,
                                                               self.retrieval_query, self.params, self.metadata_filter)
        llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm, self.include_sources,
                                                                   conversation_history, self.metadata.system_message)
        return llm_response_with_sources

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def k(self) -> int:
        """Get the number of documents to retrieve for each query."""
        return self._k

    @property
    def include_sources(self) -> bool:
        """Get whether to include the sources of the documents that were consulted to produce the response."""
        return self._include_sources

    @property
    def retrieval_query(self) -> str:
        """Get the Cypher query to use to obtain sub-graphs connected to nodes retrieved from a similarity search."""
        return self._retrieval_query

    @property
    def params(self) -> Optional[Dict[str, Any]]:
        """Get the optional parameters for the query."""
        return self._params

    @property
    def metadata_filter(self) -> Optional[Dict[str, Any]]:
        """Get the optional metadata filter to use with similarity search."""
        return self._metadata_filter

include_sources: bool property

Get whether to include the sources of the documents that were consulted to produce the response.

k: int property

Get the number of documents to retrieve for each query.

metadata_filter: Optional[Dict[str, Any]] property

Get the optional metadata filter to use with similarity search.

name: str property

Get the name of the agent.

params: Optional[Dict[str, Any]] property

Get the optional parameters for the query.

retrieval_query: str property

Get the Cypher query to use to obtain sub-graphs connected to nodes retrieved from a similarity search.

__init__(name, metadata, llm, graph_db, transport, k=DEFAULT_NUM_DOCUMENTS, include_sources=DEFAULT_INCLUDE_SOURCES, retrieval_query='', params=None, metadata_filter=None)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM to use for processing requests

  • graph_db (WiseAgentGraphDB) –

    the graph database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • k (Optional[int], default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve for each query, defaults to 4

  • include_sources (Optional(bool, default: DEFAULT_INCLUDE_SOURCES ) –

    whether to include the sources of the documents that were consulted to

  • retrieval_query (Optional[str], default: '' ) –

    the optional retrieval query to use to obtain sub-graphs connected to nodes

  • params (Optional[Dict[str, Any]], default: None ) –

    the optional parameters for the query

  • metadata_filter (Optional[Dict[str, Any]], default: None ) –

    the optional metadata filter to use with similarity search

Source code in wiseagents/agents/rag_wise_agents.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, graph_db: WiseAgentGraphDB,
             transport: WiseAgentTransport, k: Optional[int] = DEFAULT_NUM_DOCUMENTS,
             include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES,
             retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
             metadata_filter: Optional[Dict[str, Any]] = None):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM to use for processing requests
        graph_db (WiseAgentGraphDB): the graph database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        k (Optional[int]): the number of documents to retrieve for each query, defaults to 4
        include_sources Optional(bool): whether to include the sources of the documents that were consulted to
        produce the response, defaults to False
        retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
        retrieved from a similarity search
        params (Optional[Dict[str, Any]]): the optional parameters for the query
        metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search
    """
    self._k = k
    self._include_sources = include_sources
    self._retrieval_query = retrieval_query
    self._params = params
    self._metadata_filter = metadata_filter
    super().__init__(name=name, metadata=metadata, transport=self.transport, llm=llm,
                     graph_db=graph_db)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
130
131
132
133
134
135
136
137
138
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._include_sources = DEFAULT_INCLUDE_SOURCES
    obj._retrieval_query = ""
    obj._params = None
    obj._metadata_filter = None
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
170
171
172
173
174
175
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"graph_db={self.graph_db}, transport={self.transport}, k={self.k},"
            f"include_sources={self.include_sources}), retrieval_query={self.retrieval_query},"
            f"params={self.params}, metadata_filter={self.metadata_filter})")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
181
182
183
184
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
177
178
179
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message by passing it to the RAG agent and sending the response back to the client.

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • Optional[str]

    Optional[str]: the response to the request message as a string or None if there is

  • Optional[str]

    no string response yet

Source code in wiseagents/agents/rag_wise_agents.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message by passing it to the RAG agent and sending the response back to the client.

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using Graph RAG")
    retrieved_documents = retrieve_documents_for_graph_rag(request.message, self.graph_db, self.k,
                                                           self.retrieval_query, self.params, self.metadata_filter)
    llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm, self.include_sources,
                                                               conversation_history, self.metadata.system_message)
    return llm_response_with_sources

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
207
208
209
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
211
212
213
def stop(self):
    """Do nothing"""
    pass

RAGWiseAgent

Bases: WiseAgent

This agent makes use of retrieval augmented generation (RAG) to answer questions.

Source code in wiseagents/agents/rag_wise_agents.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
class RAGWiseAgent(WiseAgent):
    """
    This agent makes use of retrieval augmented generation (RAG) to answer questions.
    """
    yaml_tag = u'!wiseagents.agents.RAGWiseAgent'

    def __new__(cls, *args, **kwargs):
        """Create a new instance of the class, setting default values for the optional instance variables."""
        obj = super().__new__(cls)
        obj._collection_name = DEFAULT_COLLECTION_NAME
        obj._k = DEFAULT_NUM_DOCUMENTS
        obj._include_sources = DEFAULT_INCLUDE_SOURCES
        return obj

    def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
                 transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
                 k: Optional[int] = DEFAULT_NUM_DOCUMENTS, include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES):
        """
        Initialize the agent.

        Args:
            name (str): the name of the agent
            metadata (WiseAgentMetaData): the metadata for the agent
            llm (WiseAgentLLM): the LLM to use for processing requests
            vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
            transport (WiseAgentTransport): the transport to use for communication
            collection_name Optional(str): the name of the collection within the vector database to use for
            retrieving documents, defaults to wise-agent-collection
            k Optional(int): the number of documents to retrieve for each query, defaults to 4
            include_sources Optional(bool): whether to include the sources of the documents that were consulted to
            produce the response, defaults to False
        """
        self._k = k
        self._include_sources = include_sources
        super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                         vector_db=vector_db, collection_name=collection_name)

    def __repr__(self):
        """Return a string representation of the agent."""
        return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
                f"vector_db={self.vector_db}, collection_name={self.collection_name}, transport={self.transport},"
                f"k={self.k}, include_sources={self.include_sources}))")

    def process_event(self, event):
        """Do nothing"""
        return True

    def process_error(self, error):
        """Log the error and return True."""
        logging.error(error)
        return True

    def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
        """
        Process a request message using retrieval augmented generation (RAG).

        Args:
            request (WiseAgentMessage): the request message to process
            conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.

        Returns:
            Optional[str]: the response to the request message as a string or None if there is
            no string response yet
        """
        logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using RAG")
        retrieved_documents = retrieve_documents_for_rag(request.message, self.vector_db, self.collection_name, self.k)
        llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm,
                                                                  self.include_sources, conversation_history,
                                                                  self.metadata.system_message)
        return llm_response_with_sources

    def process_response(self, response: WiseAgentMessage):
        """Do nothing"""
        return True

    def stop(self):
        """Do nothing"""
        pass

    @property
    def name(self) -> str:
        """Get the name of the agent."""
        return self._name

    @property
    def k(self) -> int:
        """Get the number of documents to retrieve for each query."""
        return self._k

    @property
    def include_sources(self) -> bool:
        """Get whether to include the sources of the documents that were consulted to produce the response."""
        return self._include_sources

include_sources: bool property

Get whether to include the sources of the documents that were consulted to produce the response.

k: int property

Get the number of documents to retrieve for each query.

name: str property

Get the name of the agent.

__init__(name, metadata, llm, vector_db, transport, collection_name=DEFAULT_COLLECTION_NAME, k=DEFAULT_NUM_DOCUMENTS, include_sources=DEFAULT_INCLUDE_SOURCES)

Initialize the agent.

Parameters:
  • name (str) –

    the name of the agent

  • metadata (WiseAgentMetaData) –

    the metadata for the agent

  • llm (WiseAgentLLM) –

    the LLM to use for processing requests

  • vector_db (WiseAgentVectorDB) –

    the vector database to use for retrieving documents

  • transport (WiseAgentTransport) –

    the transport to use for communication

  • collection_name (Optional(str, default: DEFAULT_COLLECTION_NAME ) –

    the name of the collection within the vector database to use for

  • k (Optional(int, default: DEFAULT_NUM_DOCUMENTS ) –

    the number of documents to retrieve for each query, defaults to 4

  • include_sources (Optional(bool, default: DEFAULT_INCLUDE_SOURCES ) –

    whether to include the sources of the documents that were consulted to

Source code in wiseagents/agents/rag_wise_agents.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def __init__(self, name: str, metadata: WiseAgentMetaData, llm: WiseAgentLLM, vector_db: WiseAgentVectorDB,
             transport: WiseAgentTransport, collection_name: Optional[str] = DEFAULT_COLLECTION_NAME,
             k: Optional[int] = DEFAULT_NUM_DOCUMENTS, include_sources: Optional[bool] = DEFAULT_INCLUDE_SOURCES):
    """
    Initialize the agent.

    Args:
        name (str): the name of the agent
        metadata (WiseAgentMetaData): the metadata for the agent
        llm (WiseAgentLLM): the LLM to use for processing requests
        vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
        transport (WiseAgentTransport): the transport to use for communication
        collection_name Optional(str): the name of the collection within the vector database to use for
        retrieving documents, defaults to wise-agent-collection
        k Optional(int): the number of documents to retrieve for each query, defaults to 4
        include_sources Optional(bool): whether to include the sources of the documents that were consulted to
        produce the response, defaults to False
    """
    self._k = k
    self._include_sources = include_sources
    super().__init__(name=name, metadata=metadata, transport=transport, llm=llm,
                     vector_db=vector_db, collection_name=collection_name)

__new__(*args, **kwargs)

Create a new instance of the class, setting default values for the optional instance variables.

Source code in wiseagents/agents/rag_wise_agents.py
33
34
35
36
37
38
39
def __new__(cls, *args, **kwargs):
    """Create a new instance of the class, setting default values for the optional instance variables."""
    obj = super().__new__(cls)
    obj._collection_name = DEFAULT_COLLECTION_NAME
    obj._k = DEFAULT_NUM_DOCUMENTS
    obj._include_sources = DEFAULT_INCLUDE_SOURCES
    return obj

__repr__()

Return a string representation of the agent.

Source code in wiseagents/agents/rag_wise_agents.py
64
65
66
67
68
def __repr__(self):
    """Return a string representation of the agent."""
    return (f"{self.__class__.__name__}(name={self.name}, metadata={self.metadata}, llm={self.llm},"
            f"vector_db={self.vector_db}, collection_name={self.collection_name}, transport={self.transport},"
            f"k={self.k}, include_sources={self.include_sources}))")

process_error(error)

Log the error and return True.

Source code in wiseagents/agents/rag_wise_agents.py
74
75
76
77
def process_error(self, error):
    """Log the error and return True."""
    logging.error(error)
    return True

process_event(event)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
70
71
72
def process_event(self, event):
    """Do nothing"""
    return True

process_request(request, conversation_history)

Process a request message using retrieval augmented generation (RAG).

Parameters:
  • request (WiseAgentMessage) –

    the request message to process

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that

Returns:
  • Optional[str]

    Optional[str]: the response to the request message as a string or None if there is

  • Optional[str]

    no string response yet

Source code in wiseagents/agents/rag_wise_agents.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
def process_request(self, request: WiseAgentMessage, conversation_history: List[ChatCompletionMessageParam]) -> Optional[str]:
    """
    Process a request message using retrieval augmented generation (RAG).

    Args:
        request (WiseAgentMessage): the request message to process
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
        can be used while processing the request. If this agent isn't involved in a type of
        collaboration that makes use of the conversation history, this will be an empty list.

    Returns:
        Optional[str]: the response to the request message as a string or None if there is
        no string response yet
    """
    logging.getLogger(self.name).info(f"Received a message from {request.sender}. Starting to process it using RAG")
    retrieved_documents = retrieve_documents_for_rag(request.message, self.vector_db, self.collection_name, self.k)
    llm_response_with_sources = create_and_process_rag_prompt(retrieved_documents, request.message, self.llm,
                                                              self.include_sources, conversation_history,
                                                              self.metadata.system_message)
    return llm_response_with_sources

process_response(response)

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
100
101
102
def process_response(self, response: WiseAgentMessage):
    """Do nothing"""
    return True

stop()

Do nothing

Source code in wiseagents/agents/rag_wise_agents.py
104
105
106
def stop(self):
    """Do nothing"""
    pass

create_and_process_rag_prompt(retrieved_documents, question, llm, include_sources, conversation_history, system_message)

Create a RAG prompt and process it with the LLM agent.

Parameters:
  • retrieved_documents (List[Document]) –

    the list of retrieved documents

  • question (str) –

    the question to ask

  • llm (WiseAgentLLM) –

    the LLM agent to use for processing the prompt

  • conversation_history (List[ChatCompletionMessageParam]) –

    The conversation history that can be used while processing the request. If this agent isn't involved in a type of collaboration that makes use of the conversation history, this will be an empty list.

  • system_message (str) –

    the optional system message to use

Source code in wiseagents/agents/rag_wise_agents.py
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
def create_and_process_rag_prompt(retrieved_documents: List[Document], question: str, llm: WiseAgentLLM,
                                  include_sources: bool, conversation_history: List[ChatCompletionMessageParam],
                                  system_message: str) -> str:
    """
    Create a RAG prompt and process it with the LLM agent.

    Args:
        retrieved_documents (List[Document]): the list of retrieved documents
        question (str): the question to ask
        llm (WiseAgentLLM): the LLM agent to use for processing the prompt
        conversation_history (List[ChatCompletionMessageParam]): The conversation history that
            can be used while processing the request. If this agent isn't involved in a type of
            collaboration that makes use of the conversation history, this will be an empty list.
        system_message (str): the optional system message to use
    """
    context = "\n".join([document.content for document in retrieved_documents])
    prompt = (f"Answer the question based only on the following context:\n{context}\n"
              f"Question: {question}\n")
    if system_message or llm.system_message:
        conversation_history.append({"role": "system", "content": system_message or llm.system_message})
    conversation_history.append({"role": "user", "content": prompt})
    llm_response = llm.process_chat_completion(conversation_history, [])

    if include_sources:
        source_documents = ""
        for document in retrieved_documents:
            source_documents += f"{json.dumps(document.metadata)}\n\n"
        return f"{llm_response.choices[0].message.content}\n\nSource Metadata:\n{source_documents}"
    else:
        return llm_response.choices[0].message.content

retrieve_documents_for_graph_rag(question, graph_db, k, retrieval_query='', params=None, metadata_filter=None)

Retrieve documents to be used as the context for graph based retrieval augmented generation (Graph RAG).

Parameters:
  • question (str) –

    the question to be used to retrieve the documents

  • graph_db (WiseAgentGraphDB) –

    the graph database to use for retrieving documents

  • k (int) –

    the number of documents to retrieve for a question

  • retrieval_query (Optional[str], default: '' ) –

    the optional retrieval query to use to obtain sub-graphs connected to nodes retrieved from a similarity search

  • params (Optional[Dict[str, Any]], default: None ) –

    the optional parameters for the query

  • metadata_filter (Optional[Dict[str, Any]], default: None ) –

    the optional metadata filter to use with similarity search

Returns:

Source code in wiseagents/agents/rag_wise_agents.py
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
def retrieve_documents_for_graph_rag(question: str, graph_db: WiseAgentGraphDB, k: int,
                                     retrieval_query: Optional[str] = "", params: Optional[Dict[str, Any]] = None,
                                     metadata_filter: Optional[Dict[str, Any]] = None) -> List[Document]:
    """
    Retrieve documents to be used as the context for graph based retrieval augmented generation (Graph RAG).

    Args:
        question (str): the question to be used to retrieve the documents
        graph_db (WiseAgentGraphDB): the graph database to use for retrieving documents
        k (int): the number of documents to retrieve for a question
        retrieval_query (Optional[str]): the optional retrieval query to use to obtain sub-graphs connected to nodes
            retrieved from a similarity search
        params (Optional[Dict[str, Any]]): the optional parameters for the query
        metadata_filter (Optional[Dict[str, Any]]): the optional metadata filter to use with similarity search

    Returns:

    """
    retrieved_documents = graph_db.query_with_embeddings(query=question, k=k,
                                                         retrieval_query=retrieval_query,
                                                         params=params,
                                                         metadata_filter=metadata_filter)
    return retrieved_documents

retrieve_documents_for_rag(question, vector_db, collection_name, k)

Retrieve documents to be used as the context for retrieval augmented generation (RAG).

Parameters:
  • question (str) –

    the question to be used to retrieve the documents

  • vector_db (WiseAgentVectorDB) –

    the vector database to use for retrieving documents

  • collection_name (str) –

    the name of the collection within the vector database to use for retrieving documents

  • k (int) –

    the number of documents to retrieve for a question

Returns:

Source code in wiseagents/agents/rag_wise_agents.py
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
def retrieve_documents_for_rag(question: str, vector_db: WiseAgentVectorDB, collection_name: str, k: int) \
        -> List[Document]:
    """
    Retrieve documents to be used as the context for retrieval augmented generation (RAG).

    Args:
        question (str): the question to be used to retrieve the documents
        vector_db (WiseAgentVectorDB): the vector database to use for retrieving documents
        collection_name (str): the name of the collection within the vector database to use for
            retrieving documents
        k (int): the number of documents to retrieve for a question

    Returns:

    """
    retrieved_documents = vector_db.query([question], collection_name, k)
    if retrieved_documents:
        return retrieved_documents[0]
    else:
        return []