Issue with llm.bind_tools()

I am facing two errors with llm.bind_tools() in two scenarios. Please guide what is the issue with my code.

My code:

from langchain_openai import ChatOpenAI
import os
from langchain_community.tools import ArxivQueryRun
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.utilities import ArxivAPIWrapper
from langchain_community.tools import ArxivQueryRun
from langchain import hub
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_community.chat_models.sambanova import ChatSambaNovaCloud

os.environ[“SAMBANOVA_API_KEY”] = ‘66ea09****’
os.environ[“TAVILY_API_KEY”] = ‘tvly-B****’
os.environ[“LANGCHAIN_API_KEY”] = ‘lsv2_pt_22*****’

llm1 = ChatOpenAI(
base_url=“https://api.sambanova.ai/v1/”,
api_key=os.environ[“SAMBANOVA_API_KEY”],
streaming=False,
model=“Meta-Llama-3.1-70B-Instruct”,
temperature=0.1,
)

llm2 = ChatSambaNovaCloud(
sambanova_url = “https://api.sambanova.ai/v1/chat/completions”,
sambanova_api_key = os.environ[“SAMBANOVA_API_KEY”],
model = “Meta-Llama-3.1-70B-Instruct”,
max_tokens = 1000,
temperature = 0,
)

arxiv_wrapper = ArxivAPIWrapper(top_k_results=1, doc_content_chars_max=200)
arxiv_tool = ArxivQueryRun(arxiv_wrapper=arxiv_wrapper)
tavily_tool =TavilySearchResults(max_results=5, )
tools = [
tavily_tool,
arxiv_tool
]

llm1_with_tools = llm1.bind_tools(tools)
llm1_with_tools.invoke([HumanMessage(content=“what is the current weather in california?”)]) # Error 1 is with this line

llm2_with_tools = llm2.bind_tools(tools) # Error 2 is with this line

Error 1:

D:\OneDrive\mydirectory\venv\Lib\site-packages\pydantic\main.py:426: UserWarning: Pydantic serializer warnings:
Expected str but got dict with value {'query': 'Current weather in California'} - serialized value may not be as expected
return self.pydantic_serializer.to_python(

ValidationError Traceback (most recent call last)
Cell In[71], line 1
----> 1 llm1_with_tools.invoke([HumanMessage(content=“what is the current weather in california?”)])

File D:\OneDrive\mydirectoryvenv\Lib\site-packages\langchain_core\runnables\base.py:5354, in RunnableBindingBase.invoke(self, input, config, **kwargs)
5348 def invoke(
5349 self,
5350 input: Input,
5351 config: Optional[RunnableConfig] = None,
5352 **kwargs: Optional[Any],
5353 ) → Output:
→ 5354 return self.bound.invoke(
5355 input,
5356 self._merge_configs(config),
5357 **{**self.kwargs, **kwargs},
5358 )

File D:\OneDrive\Learning Room\GenAI Projects\Sambanova\iChat\venv\Lib\site-packages\langchain_core\language_models\chat_models.py:286, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
275 def invoke(
276 self,
277 input: LanguageModelInput,
(…)
281 **kwargs: Any,
282 ) → BaseMessage:
283 config = ensure_config(config)
284 return cast(
285 ChatGeneration,
→ 286 self.generate_prompt(
287 [self._convert_input(input)],
288 stop=stop,
289 callbacks=config.get(“callbacks”),
290 tags=config.get(“tags”),
291 metadata=config.get(“metadata”),
292 run_name=config.get(“run_name”),
293 run_id=config.pop(“run_id”, None),
294 **kwargs,
295 ).generations[0][0],
296 ).message

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_core\language_models\chat_models.py:786, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
778 def generate_prompt(
779 self,
780 prompts: list[PromptValue],
(…)
783 **kwargs: Any,
784 ) → LLMResult:
785 prompt_messages = [p.to_messages() for p in prompts]
→ 786 return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_core\language_models\chat_models.py:643, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
641 if run_managers:
642 run_managers[i].on_llm_error(e, response=LLMResult(generations=))
→ 643 raise e
644 flattened_outputs = [
645 LLMResult(generations=[res.generations], llm_output=res.llm_output) # type: ignore[list-item]
646 for res in results
647 ]
648 llm_output = self._combine_llm_outputs([res.llm_output for res in results])

File D:\OneDrive\mydirectory\iChat\venv\Lib\site-packages\langchain_core\language_models\chat_models.py:633, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
630 for i, m in enumerate(messages):
631 try:
632 results.append(
→ 633 self._generate_with_cache(
634 m,
635 stop=stop,
636 run_manager=run_managers[i] if run_managers else None,
637 **kwargs,
638 )
639 )
640 except BaseException as e:
641 if run_managers:

File D:\OneDrive\Learning Room\GenAI Projects\Sambanova\iChat\venv\Lib\site-packages\langchain_core\language_models\chat_models.py:851, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
849 else:
850 if inspect.signature(self._generate).parameters.get(“run_manager”):
→ 851 result = self._generate(
852 messages, stop=stop, run_manager=run_manager, **kwargs
853 )
854 else:
855 result = self._generate(messages, stop=stop, **kwargs)

File D:\OneDrive\Learning Room\GenAI Projects\Sambanova\iChat\venv\Lib\site-packages\langchain_openai\chat_models\base.py:690, in BaseChatOpenAI._generate(self, messages, stop, run_manager, **kwargs)
688 else:
689 response = self.client.create(**payload)
→ 690 return self._create_chat_result(response, generation_info)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_openai\chat_models\base.py:727, in BaseChatOpenAI._create_chat_result(self, response, generation_info)
725 token_usage = response_dict.get(“usage”)
726 for res in response_dict[“choices”]:
→ 727 message = _convert_dict_to_message(res[“message”])
728 if token_usage and isinstance(message, AIMessage):
729 message.usage_metadata = _create_usage_metadata(token_usage)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_openai\chat_models\base.py:134, in _convert_dict_to_message(_dict)
132 if audio := dict.get(“audio”):
133 additional_kwargs[“audio”] = audio
→ 134 return AIMessage(
135 content=content,
136 additional_kwargs=additional_kwargs,
137 name=name,
138 id=id
,
139 tool_calls=tool_calls,
140 invalid_tool_calls=invalid_tool_calls,
141 )
142 elif role == “system”:
143 return SystemMessage(content=dict.get(“content”, “”), name=name, id=id)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_core\messages\ai.py:179, in AIMessage.init(self, content, **kwargs)
170 def init(
171 self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
172 ) → None:
173 “”“Pass in content as positional arg.
174
175 Args:
176 content: The content of the message.
177 kwargs: Additional arguments to pass to the parent class.
178 “””
→ 179 super().init(content=content, **kwargs)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_core\messages\base.py:76, in BaseMessage.init(self, content, **kwargs)
67 def init(
68 self, content: Union[str, list[Union[str, dict]]], **kwargs: Any
69 ) → None:
70 “”“Pass in content as positional arg.
71
72 Args:
73 content: The string contents of the message.
74 kwargs: Additional fields to pass to the
75 “””
—> 76 super().init(content=content, **kwargs)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\langchain_core\load\serializable.py:125, in Serializable.init(self, *args, **kwargs)
123 def init(self, *args: Any, **kwargs: Any) → None:
124 “”“”“”
→ 125 super().init(*args, **kwargs)

File D:\OneDrive\mydirectory\venv\Lib\site-packages\pydantic\main.py:214, in BaseModel.init(self, **data)
212 # __tracebackhide__ tells pytest and some other tools to omit this function from tracebacks
213 tracebackhide = True
→ 214 validated_self = self.pydantic_validator.validate_python(data, self_instance=self)
215 if self is not validated_self:
216 warnings.warn(
217 ‘A custom validator is returning a value other than self.\n’
218 “Returning anything other than self from a top level model validator isn’t supported when validating via __init__.\n”
219 ‘See the model_validator docs (Validators - Pydantic) for more details.’,
220 stacklevel=2,
221 )

ValidationError: 1 validation error for AIMessage
invalid_tool_calls.0.args
Input should be a valid string [type=string_type, input_value={‘query’: ‘Current weather in California’}, input_type=dict]
For further information visit Redirecting...

Error 2:

NotImplementedError Traceback (most recent call last)
Cell In[74], line 1
----> 1 llm2_with_tools = llm2.bind_tools(tools)
2 llm2_with_tools

File D:\OneDrive\mydiretcory\venv\Lib\site-packages\langchain_core\language_models\chat_models.py:1115, in BaseChatModel.bind_tools(self, tools, **kwargs)
1108 def bind_tools(
1109 self,
1110 tools: Sequence[
(…)
1113 **kwargs: Any,
1114 ) → Runnable[LanguageModelInput, BaseMessage]:
→ 1115 raise NotImplementedError

NotImplementedError:

1 Like

In most cases, tools in wrappers like Langchain are not fully compatible.

I recommend channeling these tools using the Sambanova tool call example. It might look more difficult to implement, but it is less framework-dependent.

You can see the example on the following link:
https://community.sambanova.ai/t/function-calling-and-json-mode-in-sambanova-cloud/540

1 Like

@maybinmichael91 welcome to the community and I am sorry you are having issues. Did the thread suggested by @hello1 help?

1 Like

@hello1 Thank you for lending a hand.

1 Like

Thank you @hello1 . I willtry the recommended approach.

Hi @coby.adams thank you, I am yet to try the approach.

1 Like