Provider List: https://docs.litellm.ai/docs/providers
Traceback (most recent call last):
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/main.py", line 2673, in completion
generator = ollama.get_ollama_response(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/llms/ollama/completion/handler.py", line 128, in get_ollama_response
raise OllamaError(
litellm.llms.ollama.common_utils.OllamaError: {"error":"model 'qwen2.5:72b ' not found"}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/lizhaorui/anaconda3/envs/agent/bin/auto", line 8, in
sys.exit(cli())
^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/click/core.py", line 1157, in call
return self.main(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/DL/AutoAgent/autoagent/cli.py", line 224, in main
user_mode(model, context_variables, False)
File "/home/lizhaorui/DL/AutoAgent/autoagent/cli.py", line 290, in user_mode
response = client.run(agent, messages, context_variables, debug=debug)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/DL/AutoAgent/autoagent/core.py", line 429, in run
completion = self.get_chat_completion(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/DL/AutoAgent/autoagent/core.py", line 202, in get_chat_completion
completion_response = completion(**create_params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/utils.py", line 980, in wrapper
raise e
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/utils.py", line 861, in wrapper
result = original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/main.py", line 2951, in completion
raise exception_type(
^^^^^^^^^^^^^^^
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2141, in exception_type
raise e
File "/home/lizhaorui/anaconda3/envs/agent/lib/python3.12/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2110, in exception_type
raise APIConnectionError(
litellm.exceptions.APIConnectionError: litellm.APIConnectionError: OllamaException - {"error":"model 'qwen2.5:72b ' not found"}
I have changed these 2 places as following:
COMPLETION_MODEL = os.getenv('COMPLETION_MODEL','ollama/qwen2.5:72b ' ) API_BASE_URL = 'http://127.0.0.1:11434/'
Up-vote Ollama support needed.
Would love to try this on my local llm with Ollama I'm running ipex llm w pytorch.