autogen
autogen copied to clipboard
To track autogen agentic performance and usage using the agentops. Error: AutogenLogger.log_chat_completion() missing 1 required positional argument: 'start_time'
What happened?
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Cell In[36], line 40
37 log_agent_activity(text)
39 start_t=perf_counter()
---> 40 result=user_proxy.initiate_chat(manager,
41 message=user_question,
42 )
43 agentops.end_session("Success")
44 end_t=perf_counter()
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:1018, in ConversableAgent.initiate_chat(self, recipient, clear_history, silent, cache, max_turns, summary_method, summary_args, message, **kwargs)
1016 else:
1017 msg2send = self.generate_init_message(message, **kwargs)
-> 1018 self.send(msg2send, recipient, silent=silent)
1019 summary = self._summarize_chat(
1020 summary_method,
1021 summary_args,
1022 recipient,
1023 cache=cache,
1024 )
1025 for agent in [self, recipient]:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:655, in ConversableAgent.send(self, message, recipient, request_reply, silent)
653 valid = self._append_oai_message(message, "assistant", recipient)
654 if valid:
--> 655 recipient.receive(message, self, request_reply, silent)
656 else:
657 raise ValueError(
658 "Message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
659 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:818, in ConversableAgent.receive(self, message, sender, request_reply, silent)
816 if request_reply is False or request_reply is None and self.reply_at_receive[sender] is False:
817 return
--> 818 reply = self.generate_reply(messages=self.chat_messages[sender], sender=sender)
819 if reply is not None:
820 self.send(reply, sender, silent=silent)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:1974, in ConversableAgent.generate_reply(self, messages, sender, **kwargs)
1972 continue
1973 if self._match_trigger(reply_func_tuple["trigger"], sender):
-> 1974 final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
1975 if logging_enabled():
1976 log_event(
1977 self,
1978 "reply_func_executed",
(...)
1982 reply=reply,
1983 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\groupchat.py:1058, in GroupChatManager.run_chat(self, messages, sender, config)
1056 iostream.print(colored(f"\nNext speaker: {speaker.name}\n", "green"), flush=True)
1057 # let the speaker speak
-> 1058 reply = speaker.generate_reply(sender=self)
1059 except KeyboardInterrupt:
1060 # let the admin agent speak if interrupted
1061 if groupchat.admin_name in groupchat.agent_names:
1062 # admin agent is one of the participants
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:1974, in ConversableAgent.generate_reply(self, messages, sender, **kwargs)
1972 continue
1973 if self._match_trigger(reply_func_tuple["trigger"], sender):
-> 1974 final, reply = reply_func(self, messages=messages, sender=sender, config=reply_func_tuple["config"])
1975 if logging_enabled():
1976 log_event(
1977 self,
1978 "reply_func_executed",
(...)
1982 reply=reply,
1983 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:1340, in ConversableAgent.generate_oai_reply(self, messages, sender, config)
1338 if messages is None:
1339 messages = self._oai_messages[sender]
-> 1340 extracted_response = self._generate_oai_reply_from_client(
1341 client, self._oai_system_message + messages, self.client_cache
1342 )
1343 return (False, None) if extracted_response is None else (True, extracted_response)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\agentchat\conversable_agent.py:1359, in ConversableAgent._generate_oai_reply_from_client(self, llm_client, messages, cache)
1356 all_messages.append(message)
1358 # TODO: #1143 handle token limit exceeded error
-> 1359 response = llm_client.create(
1360 context=messages[-1].pop("context", None),
1361 messages=all_messages,
1362 cache=cache,
1363 )
1364 extracted_response = llm_client.extract_text_or_completion_object(response)[0]
1366 if extracted_response is None:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\oai\client.py:679, in OpenAIWrapper.create(self, **config)
675 cache.set(key, response)
677 if logging_enabled():
678 # TODO: log the config_id and pass_filter etc.
--> 679 log_chat_completion(
680 invocation_id=invocation_id,
681 client_id=id(client),
682 wrapper_id=id(self),
683 request=params,
684 response=response,
685 is_cached=0,
686 cost=response.cost,
687 start_time=request_ts,
688 )
690 response.message_retrieval_function = client.message_retrieval
691 # check the filter
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\autogen\runtime_logging.py:69, in log_chat_completion(invocation_id, client_id, wrapper_id, request, response, is_cached, cost, start_time)
66 logger.error("[runtime logging] log_chat_completion: autogen logger is None")
67 return
---> 69 autogen_logger.log_chat_completion(
70 invocation_id, client_id, wrapper_id, request, response, is_cached, cost, start_time
71 )
**TypeError: AutogenLogger.log_chat_completion() missing 1 required positional argument: 'start_time'**
Details: Python=3.10.7 langchain_community==0.2.4 langchain_openai==0.1.8 langchain==0.2.3 pyautogen==0.2.28 agentops==0.3.18
What did you expect to happen?
Need help to resolve this error/bug.
How can we reproduce it (as minimally and precisely as possible)?
import pyautogen and agentops agentops start session initialize and run basic agent
AutoGen version
0.3.18
Which package was this bug in
Other
Model used
gpt-4
Python version
3.10.7
Operating system
windows
Any additional info you think would be helpful for fixing this bug
No response
@rysweet is there a proper label for v0.3* issues I should use?
@sanket-affine please raise the issue with AgentOps team thanks.
Closed as this is for agentops