ValidationError Traceback (most recent call last)
Cell In[2], line 7
4 print('\n#############################################\n')
6 application.knowledge_service.init_knowledge_base()
----> 7 result2 = application.get_knowledeg_based_answer('冠心病是什么原因引起的?可以吃什么药?')
8 print('\n############################################\n')
9 print('\nresult of knowledge bas[e:\n](localexplorer:E:\n)')
Cell In[1], line 77, in LangChainApplication.get_knowledeg_based_answer(self, query, history_len, temperature, top_p, top_k, chat_history)
74 knowledge_chain.return_source_documents = True
76 #传入问题内容进行查询
---> 77 result = knowledge_chain({"query":query})
78 return result
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:148, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
146 warned = True
147 emit_warning()
--> 148 return wrapped(*args, **kwargs)
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:378, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
346 """Execute the chain.
347
348 Args:
(...)
369 `Chain.output_keys`.
370 """
371 config = {
372 "callbacks": callbacks,
373 "tags": tags,
374 "metadata": metadata,
375 "run_name": run_name,
376 }
--> 378 return self.invoke(
379 inputs,
380 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
381 return_only_outputs=return_only_outputs,
382 include_run_info=include_run_info,
383 )
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/retrieval_qa/base.py:145, in BaseRetrievalQA._call(self, inputs, run_manager)
143 else:
144 docs = self._get_docs(question) # type: ignore[call-arg]
--> 145 answer = self.combine_documents_chain.run(
146 input_documents=docs, question=question, callbacks=_run_manager.get_child()
147 )
149 if self.return_source_documents:
150 return {self.output_key: answer, "source_documents": docs}
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:148, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
146 warned = True
147 emit_warning()
--> 148 return wrapped(*args, **kwargs)
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:600, in Chain.run(self, callbacks, tags, metadata, *args, **kwargs)
595 return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
596 _output_key
597 ]
599 if kwargs and not args:
--> 600 return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
601 _output_key
602 ]
604 if not kwargs and not args:
605 raise ValueError(
606 "`run` supported with either positional arguments or keyword arguments,"
607 " but none were provided."
608 )
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:148, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
146 warned = True
147 emit_warning()
--> 148 return wrapped(*args, **kwargs)
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:378, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
346 """Execute the chain.
347
348 Args:
(...)
369 `Chain.output_keys`.
370 """
371 config = {
372 "callbacks": callbacks,
373 "tags": tags,
374 "metadata": metadata,
375 "run_name": run_name,
376 }
--> 378 return self.invoke(
379 inputs,
380 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
381 return_only_outputs=return_only_outputs,
382 include_run_info=include_run_info,
383 )
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/combine_documents/base.py:137, in BaseCombineDocumentsChain._call(self, inputs, run_manager)
135 # Other keys are assumed to be needed for LLM prediction
136 other_keys = {k: v for k, v in inputs.items() if k != self.input_key}
--> 137 output, extra_return_dict = self.combine_docs(
138 docs, callbacks=_run_manager.get_child(), **other_keys
139 )
140 extra_return_dict[self.output_key] = output
141 return extra_return_dict
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/combine_documents/stuff.py:244, in StuffDocumentsChain.combine_docs(self, docs, callbacks, **kwargs)
242 inputs = self._get_inputs(docs, **kwargs)
243 # Call predict on the LLM.
--> 244 return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/llm.py:316, in LLMChain.predict(self, callbacks, **kwargs)
301 def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str:
302 """Format prompt with kwargs and pass to LLM.
303
304 Args:
(...)
314 completion = llm.predict(adjective="funny")
315 """
--> 316 return self(kwargs, callbacks=callbacks)[self.output_key]
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/_api/deprecation.py:148, in deprecated.<locals>.deprecate.<locals>.warning_emitting_wrapper(*args, **kwargs)
146 warned = True
147 emit_warning()
--> 148 return wrapped(*args, **kwargs)
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:378, in Chain.__call__(self, inputs, return_only_outputs, callbacks, tags, metadata, run_name, include_run_info)
346 """Execute the chain.
347
348 Args:
(...)
369 `Chain.output_keys`.
370 """
371 config = {
372 "callbacks": callbacks,
373 "tags": tags,
374 "metadata": metadata,
375 "run_name": run_name,
376 }
--> 378 return self.invoke(
379 inputs,
380 cast(RunnableConfig, {k: v for k, v in config.items() if v is not None}),
381 return_only_outputs=return_only_outputs,
382 include_run_info=include_run_info,
383 )
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:163, in Chain.invoke(self, input, config, **kwargs)
161 except BaseException as e:
162 run_manager.on_chain_error(e)
--> 163 raise e
164 run_manager.on_chain_end(outputs)
166 if include_run_info:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/base.py:153, in Chain.invoke(self, input, config, **kwargs)
150 try:
151 self._validate_inputs(inputs)
152 outputs = (
--> 153 self._call(inputs, run_manager=run_manager)
154 if new_arg_supported
155 else self._call(inputs)
156 )
158 final_outputs: Dict[str, Any] = self.prep_outputs(
159 inputs, outputs, return_only_outputs
160 )
161 except BaseException as e:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/llm.py:126, in LLMChain._call(self, inputs, run_manager)
121 def _call(
122 self,
123 inputs: Dict[str, Any],
124 run_manager: Optional[CallbackManagerForChainRun] = None,
125 ) -> Dict[str, str]:
--> 126 response = self.generate([inputs], run_manager=run_manager)
127 return self.create_outputs(response)[0]
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain/chains/llm.py:138, in LLMChain.generate(self, input_list, run_manager)
136 callbacks = run_manager.get_child() if run_manager else None
137 if isinstance(self.llm, BaseLanguageModel):
--> 138 return self.llm.generate_prompt(
139 prompts,
140 stop,
141 callbacks=callbacks,
142 **self.llm_kwargs,
143 )
144 else:
145 results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
146 cast(List, prompts), {"callbacks": callbacks}
147 )
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/language_models/llms.py:633, in BaseLLM.generate_prompt(self, prompts, stop, callbacks, **kwargs)
625 def generate_prompt(
626 self,
627 prompts: List[PromptValue],
(...)
630 **kwargs: Any,
631 ) -> LLMResult:
632 prompt_strings = [p.to_string() for p in prompts]
--> 633 return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/language_models/llms.py:803, in BaseLLM.generate(self, prompts, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
788 if (self.cache is None and get_llm_cache() is None) or self.cache is False:
789 run_managers = [
790 callback_manager.on_llm_start(
791 dumpd(self),
(...)
801 )
802 ]
--> 803 output = self._generate_helper(
804 prompts, stop, run_managers, bool(new_arg_supported), **kwargs
805 )
806 return output
807 if len(missing_prompts) > 0:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/language_models/llms.py:670, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
668 for run_manager in run_managers:
669 run_manager.on_llm_error(e, response=LLMResult(generations=[]))
--> 670 raise e
671 flattened_outputs = output.flatten()
672 for manager, flattened_output in zip(run_managers, flattened_outputs):
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/language_models/llms.py:657, in BaseLLM._generate_helper(self, prompts, stop, run_managers, new_arg_supported, **kwargs)
647 def _generate_helper(
648 self,
649 prompts: List[str],
(...)
653 **kwargs: Any,
654 ) -> LLMResult:
655 try:
656 output = (
--> 657 self._generate(
658 prompts,
659 stop=stop,
660 # TODO: support multiple run managers
661 run_manager=run_managers[0] if run_managers else None,
662 **kwargs,
663 )
664 if new_arg_supported
665 else self._generate(prompts, stop=stop)
666 )
667 except BaseException as e:
668 for run_manager in run_managers:
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/langchain_core/language_models/llms.py:1321, in LLM._generate(self, prompts, stop, run_manager, **kwargs)
1315 for prompt in prompts:
1316 text = (
1317 self._call(prompt, stop=stop, run_manager=run_manager, **kwargs)
1318 if new_arg_supported
1319 else self._call(prompt, stop=stop, **kwargs)
1320 )
-> 1321 generations.append([Generation(text=text)])
1322 return LLMResult(generations=generations)
File /usr/local/miniconda3/envs/langchain/lib/python3.11/site-packages/pydantic/v1/main.py:341, in BaseModel.__init__(__pydantic_self__, **data)
339 values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data)
340 if validation_error:
--> 341 raise validation_error
342 try:
343 object_setattr(__pydantic_self__, '__dict__', values)
ValidationError: 1 validation error for Generation
text
str type expected (type=type_error.str)