autogen icon indicating copy to clipboard operation
autogen copied to clipboard

When using SemanticKernel adapter with AWS Bedrock Claude, got Tool Call Error: The tool 'autogen-tools_get_weather' is not available.

Open ekzhu opened this issue 1 year ago • 12 comments

Discussed in https://github.com/microsoft/autogen/discussions/5420

Originally posted by GxWwT February 7, 2025

import boto3
import asyncio
from botocore.config import Config

from autogen_core.models import ModelInfo, ModelFamily
from autogen_ext.models.semantic_kernel import SKChatCompletionAdapter

from semantic_kernel import Kernel
from semantic_kernel.connectors.ai.bedrock import BedrockChatCompletion, BedrockChatPromptExecutionSettings
from semantic_kernel.memory.null_memory import NullMemory

my_config = Config(
    region_name = 'us-east-1',
    signature_version = 'v4',
    retries = {
        'max_attempts': 10,
        'mode': 'standard'
    },
)

# Create the custom boto3 client
bedrock_runtime_client = boto3.client(service_name='bedrock-runtime', config=my_config)
bedrock_client = boto3.client("bedrock", config=my_config)

sk_client = BedrockChatCompletion(
    model_id='anthropic.claude-3-5-sonnet-20240620-v1:0',
    runtime_client=bedrock_runtime_client,
    client=bedrock_client,
)

# Configure execution settings
settings = BedrockChatPromptExecutionSettings(
    temperature=0.7,
    max_tokens=1000,
)

model_info = ModelInfo(vision=False, function_calling=True, json_output=True, family=ModelFamily.UNKNOWN)
model_client = SKChatCompletionAdapter(
    sk_client,
    kernel=Kernel(memory=NullMemory()),
    prompt_settings=settings,
    model_info=model_info,
)

from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.messages import TextMessage
from autogen_core import CancellationToken

async def get_weather(city: str) -> str:
    """Get the current weather for a given city"""
    return f"The weather in {city} is 73 degrees and Sunny."

async def main() -> None:
    weather_agent = AssistantAgent(
        name="assistant",
        model_client=model_client,
        tools=[get_weather],
        system_message="You are a helpful AI assistant that can provide weather information.",
        model_client_stream=True,
    )
    # print("Registered tools:", [tool.name for tool in weather_agent._tools])

    stream = weather_agent.on_messages_stream(
        [TextMessage(content="Weather in Shanghai", source="user")], CancellationToken()
    )
    async for response in stream:
        print(response)

asyncio.run(main())

Error

source='assistant' models_usage=None content="Certainly! I'd be happy to" type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content=' provide you with the current weather' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content=' information for Shanghai. To get the most' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content=' up-to-date an' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content="d accurate weather data, I'll nee" type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content='d to use the weather' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content=' tool. Let me fetch' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content=' that information for you right' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=None content=' away.' type='ModelClientStreamingChunkEvent'
source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) content=[FunctionCall(id='tooluse_VN13BhhhTLuwwFxmtQUdqA', arguments='{}', name='autogen-tools_get_weather')] type='ToolCallRequestEvent'
source='assistant' models_usage=None content=[FunctionExecutionResult(content="Error: The tool 'autogen-tools_get_weather' is not available.", call_id='tooluse_VN13BhhhTLuwwFxmtQUdqA')] type='ToolCallExecutionEvent'
Response(chat_message=ToolCallSummaryMessage(source='assistant', models_usage=None, content="**Error: The tool 'autogen-tools_get_weather' is not available.**", type='ToolCallSummaryMessage'), inner_messages=[ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), content=[FunctionCall(id='tooluse_VN13BhhhTLuwwFxmtQUdqA', arguments='{}', name='autogen-tools_get_weather')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content="Error: The tool 'autogen-tools_get_weather' is not available.", call_id='tooluse_VN13BhhhTLuwwFxmtQUdqA')], type='ToolCallExecutionEvent')])

ekzhu avatar Feb 07 '25 20:02 ekzhu

Related #5413. cc @lspinheiro

ekzhu avatar Feb 07 '25 20:02 ekzhu

Resolved now will be released in v0.4.6

ekzhu avatar Feb 10 '25 19:02 ekzhu

@ekzhu @lspinheiro Is this released I have folowing opentelemetry-semantic-conventions 0.51b0 semantic-kernel 1.17.1 semantic-version 2.10.0 autogen-agentchat 0.4.7 autogen-core 0.4.7 autogen-ext 0.4.7 Still getting this error ---------- weather_agent ---------- Error: The tool 'get-weather' is not available.

asheeshgarg avatar Feb 27 '25 21:02 asheeshgarg

I'm seeing this too.

Both via the example provided, and the code I'm building today.

Prompt:

Hello! Could you create a new feature using user ID 12345 and the tool create_a_new_feature_for_the_user?

Log:

INFO:__main__:Assistant response: [ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=513, completion_tokens=148), content=[FunctionCall(id='tooluse_tURt1VlSTcqLQr88WbpZKg', arguments='{"user_id": 12345}', name='create-a_new_feature_for_the_user')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, content=[FunctionExecutionResult(content="Error: The tool 'create-a_new_feature_for_the_user' is not available.", call_id='tooluse_tURt1VlSTcqLQr88WbpZKg', is_error=True)], type='ToolCallExecutionEvent')]

Interesting that it's calling create-a_new_feature_for_the_user when the function is called create_a_new_feature_for_the_user

natemellendorf avatar Mar 02 '25 21:03 natemellendorf

Ok, yes. Very much a bug.

In the example, if I change get_weather to getWeather , it works.

Will wait for an update to see when this is fixed 👀

natemellendorf avatar Mar 02 '25 22:03 natemellendorf

@ekzhu this is important feature for bedrock integration hope it will be release soon.

asheeshgarg avatar Mar 02 '25 23:03 asheeshgarg

@asheeshgarg @natemellendorf are you interested in helping debugging this in the main branch?

Some changes have been made already so it would be helpful to try it again from the main branch.

ekzhu avatar Mar 02 '25 23:03 ekzhu

@ekzhu I tried with dist build from main autogen-agentchat 0.4.8 autogen-core 0.4.8 autogen-ext 0.4.8

Still facing same issue---------- assistant ---------- [FunctionExecutionResult(content="Error: The tool 'get-weather' is not available.", call_id='tooluse_0tCWJKLETZia2DaOB192HQ', is_error=True)] ---------- assistant ---------- Error: The tool 'get-weather' is not available. getWeather works as pointed out by @natemellendorf

asheeshgarg avatar Mar 03 '25 01:03 asheeshgarg

@asheeshgarg @natemellendorf , can you post the full error trace you currently have? Currently I'm unable to reproduce due to an issue with cross-inference endpoints in SK: https://github.com/microsoft/semantic-kernel/issues/10738

lspinheiro avatar Mar 05 '25 03:03 lspinheiro

@ekzhu @lspinheiro Still the same error. I used autogen-agentchat 0.4.8.1 autogen-core 0.4.8.1 autogen-ext 0.4.8.1

from autogen_core.models import ModelInfo, ModelFamily

model_info = ModelInfo(vision=False, function_calling=True, json_output=True, family=ModelFamily.CLAUDE_3_5_SONNET) model_client = SKChatCompletionAdapter( sk_client, kernel=Kernel(memory=NullMemory()), prompt_settings=settings, model_info=model_info, )

from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage from autogen_core import CancellationToken from autogen_agentchat.ui import Console from autogen_agentchat.teams import RoundRobinGroupChat

async def get_weather(city: str) -> str: """Get the current weather for a given city""" return f"The weather in {city} is 73 degrees and Sunny."

weather_agent = AssistantAgent( name="assistant", model_client=model_client, tools=[get_weather], system_message="You are a helpful AI assistant that can provide weather information.", model_client_stream=True, )

print("Registered tools:", [tool.name for tool in weather_agent._tools])

agent_team = RoundRobinGroupChat([weather_agent], max_turns=1)

await Console(agent_team.run_stream(task="weather in NY"))

INFO:autogen_agentchat.events:source='assistant' models_usage=None metadata={} content=' away.' type='ModelClientStreamingChunkEvent' INFO:autogen_core:Calling message handler for collect_output_messages with message type GroupChatMessage published by assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9 INFO:autogen_core.events:{"payload": "{"message":{"source":"assistant","models_usage":{"prompt_tokens":0,"completion_tokens":0},"metadata":{},"content":[{"id":"tooluse_F76iDVlTQ_647m-k4ILukw","arguments":"{\"city\": \"New York\"}","name":"get-weather"}],"type":"ToolCallRequestEvent"}}", "sender": "assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "receiver": null, "kind": "MessageKind.PUBLISH", "delivery_stage": "DeliveryStage.DELIVER", "type": "Message"} INFO:autogen_agentchat.events:source='assistant' models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0) metadata={} content=[FunctionCall(id='tooluse_F76iDVlTQ_647m-k4ILukw', arguments='{"city": "New York"}', name='get-weather')] type='ToolCallRequestEvent' INFO:autogen_core:Calling message handler for collect_output_messages with message type GroupChatMessage published by assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9 INFO:autogen_core.events:{"payload": "{"message":{"source":"assistant","models_usage":null,"metadata":{},"content":[{"content":"Error: The tool 'get-weather' is not available.","name":"get-weather","call_id":"tooluse_F76iDVlTQ_647m-k4ILukw","is_error":true}],"type":"ToolCallExecutionEvent"}}", "sender": "assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "receiver": null, "kind": "MessageKind.PUBLISH", "delivery_stage": "DeliveryStage.DELIVER", "type": "Message"} INFO:autogen_agentchat.events:source='assistant' models_usage=None metadata={} content=[FunctionExecutionResult(content="Error: The tool 'get-weather' is not available.", name='get-weather', call_id='tooluse_F76iDVlTQ_647m-k4ILukw', is_error=True)] type='ToolCallExecutionEvent' INFO:autogen_core:Calling message handler for collect_output_messages with message type GroupChatMessage published by assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9 INFO:autogen_core.events:{"payload": "{"message":{"source":"assistant","models_usage":null,"metadata":{},"content":"Error: The tool 'get-weather' is not available.","type":"ToolCallSummaryMessage"}}", "sender": "assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "receiver": null, "kind": "MessageKind.PUBLISH", "delivery_stage": "DeliveryStage.DELIVER", "type": "Message"} INFO:autogen_agentchat.events:source='assistant' models_usage=None metadata={} content="Error: The tool 'get-weather' is not available." type='ToolCallSummaryMessage' INFO:autogen_core:Calling message handler for group_chat_manager with message type GroupChatAgentResponse published by assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9 INFO:autogen_core.events:{"payload": "{"agent_response":{"chat_message":{"source":"assistant","models_usage":null,"metadata":{},"content":"Error: The tool 'get-weather' is not available.","type":"ToolCallSummaryMessage"},"inner_messages":[{"source":"assistant","models_usage":{"prompt_tokens":0,"completion_tokens":0},"metadata":{},"content":[{"id":"tooluse_F76iDVlTQ_647m-k4ILukw","arguments":"{\"city\": \"New York\"}","name":"get-weather"}],"type":"ToolCallRequestEvent"},{"source":"assistant","models_usage":null,"metadata":{},"content":[{"content":"Error: The tool 'get-weather' is not available.","name":"get-weather","call_id":"tooluse_F76iDVlTQ_647m-k4ILukw","is_error":true}],"type":"ToolCallExecutionEvent"}]}}", "sender": "assistant/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "receiver": null, "kind": "MessageKind.PUBLISH", "delivery_stage": "DeliveryStage.DELIVER", "type": "Message"} INFO:autogen_core:Publishing message of type GroupChatTermination to all subscribers: {'message': StopMessage(source='Group chat manager', models_usage=None, metadata={}, content='Maximum number of turns 1 reached.', type='StopMessage')} INFO:autogen_core.events:{"payload": "{"message":{"source":"Group chat manager","models_usage":null,"metadata":{},"content":"Maximum number of turns 1 reached.","type":"StopMessage"}}", "sender": "group_chat_manager/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "receiver": "output_topic/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "kind": "MessageKind.PUBLISH", "delivery_stage": "DeliveryStage.SEND", "type": "Message"} INFO:autogen_core:Calling message handler for collect_output_messages with message type GroupChatTermination published by group_chat_manager/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9 INFO:autogen_core.events:{"payload": "{"message":{"source":"Group chat manager","models_usage":null,"metadata":{},"content":"Maximum number of turns 1 reached.","type":"StopMessage"}}", "sender": "group_chat_manager/646fa0e5-e9c2-4b13-8a30-e39dd51f70c9", "receiver": null, "kind": "MessageKind.PUBLISH", "delivery_stage": "DeliveryStage.DELIVER", "type": "Message"} INFO:autogen_agentchat.events:source='Group chat manager' models_usage=None metadata={} content='Maximum number of turns 1 reached.' type='StopMessage' Certainly! I'd be happy to help you get the current weather information for New York. To provide you with accurate information, I'll need to use the weather tool. Let me fetch that for you right away. ---------- assistant ---------- [FunctionExecutionResult(content="Error: The tool 'get-weather' is not available.", name='get-weather', call_id='tooluse_F76iDVlTQ_647m-k4ILukw', is_error=True)] ---------- assistant ---------- Error: The tool 'get-weather' is not available. TaskResult(messages=[TextMessage(source='user', models_usage=None, metadata={}, content='weather in NY', type='TextMessage'), ToolCallRequestEvent(source='assistant', models_usage=RequestUsage(prompt_tokens=0, completion_tokens=0), metadata={}, content=[FunctionCall(id='tooluse_F76iDVlTQ_647m-k4ILukw', arguments='{"city": "New York"}', name='get-weather')], type='ToolCallRequestEvent'), ToolCallExecutionEvent(source='assistant', models_usage=None, metadata={}, content=[FunctionExecutionResult(content="Error: The tool 'get-weather' is not available.", name='get-weather', call_id='tooluse_F76iDVlTQ_647m-k4ILukw', is_error=True)], type='ToolCallExecutionEvent'), ToolCallSummaryMessage(source='assistant', models_usage=None, metadata={}, content="Error: The tool 'get-weather' is not available.", type='ToolCallSummaryMessage')], stop_reason='Maximum number of turns 1 reached.')

asheeshgarg avatar Mar 07 '25 20:03 asheeshgarg

Also is there a way to load it using bedrock models using config? Just like we have Chatcompletion client load method

asheeshgarg avatar Mar 07 '25 20:03 asheeshgarg

I'm having the same problem.

❯ pip list | grep autogen
autogen-agentchat                  0.4.9.2
autogen-core                       0.4.9.2
autogen-ext                        0.4.7

my code:


import asyncio
from semantic_kernel.connectors.ai.bedrock import BedrockChatCompletion, BedrockPromptExecutionSettings
from autogen_ext.models.semantic_kernel import SKChatCompletionAdapter
from autogen_agentchat.agents import AssistantAgent
from autogen_agentchat.ui import Console
from semantic_kernel.memory.null_memory import NullMemory
from semantic_kernel import Kernel
from autogen_core.models import UserMessage,ModelFamily,ModelInfo

class CustomSKChatCompletionAdapter(SKChatCompletionAdapter):
    def close(self):
        pass

async def get_weather(city: str) -> str:
    """Get the weather for a given city."""
    return f"The weather in {city} is 73 degrees and Sunny."
 
async def main() -> None:
    sk_client = BedrockChatCompletion(
        model_id="anthropic.claude-3-5-sonnet-20240620-v1:0"
    )

    settings = BedrockPromptExecutionSettings(
        temperature=0.2,
    )

    model_info = ModelInfo(vision=False, function_calling=True, json_output=True, family=ModelFamily.CLAUDE_3_5_SONNET)
    bedrock_model_client = CustomSKChatCompletionAdapter(
        sk_client, 
        kernel=Kernel(memory=NullMemory()), 
        prompt_settings=settings,
        model_info=model_info,    
    )

    agent = AssistantAgent(
        name="weather_agent",
        model_client=bedrock_model_client,
        tools=[get_weather],
        system_message="You are a helpful assistant.",
        reflect_on_tool_use=False,
        model_client_stream=False,
    )

    await Console(agent.run_stream(task="What is the weather in London?"))

asyncio.run(main())

output:
---------- user ----------
What is the weather in London?
---------- weather_agent ----------
[FunctionCall(id='tooluse_gOdHo3RpS5241wPeK37pHA', arguments='{"city": "London"}', name='get-weather')]
---------- weather_agent ----------
[FunctionExecutionResult(content="Error: The tool 'get-weather' is not available.", name='get-weather', call_id='tooluse_gOdHo3RpS5241wPeK37pHA', is_error=True)]
---------- weather_agent ----------
Error: The tool 'get-weather' is not available.

if I change get_weather to getWeather, it also works

Malayke avatar Mar 26 '25 07:03 Malayke