Skip to content

Commit 84ab984

Browse files
committed
feat:upgrade extract ollama thinking content
1 parent d38b663 commit 84ab984

3 files changed

Lines changed: 4 additions & 183 deletions

File tree

src/backend/bisheng/core/ai/__init__.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,11 @@
33
from langchain_community.document_compressors import DashScopeRerank
44
from langchain_community.embeddings import DashScopeEmbeddings
55
from langchain_deepseek import ChatDeepSeek
6-
from langchain_ollama import OllamaEmbeddings
6+
from langchain_ollama import OllamaEmbeddings, ChatOllama
77
from langchain_openai import ChatOpenAI, AzureChatOpenAI, OpenAIEmbeddings, AzureOpenAIEmbeddings
88

99
from .asr import OpenAIASRClient, AliyunASRClient, AzureOpenAIASRClient
1010
from .base import BaseASRClient, BaseTTSClient
11-
from .llm.chat_ollama import CustomChatOllamaWithReasoning
1211
from .llm.chat_openai_compatible import ChatOpenAICompatible
1312
from .rerank.common_rerank import CommonRerank
1413
from .rerank.xinference_rerank import XinferenceRerank
@@ -26,7 +25,7 @@
2625
'AliyunTTSClient',
2726
'AzureOpenAITTSClient',
2827

29-
'CustomChatOllamaWithReasoning',
28+
'ChatOllama',
3029
'ChatOpenAI',
3130
'AzureChatOpenAI',
3231
'ChatTongyi',

src/backend/bisheng/core/ai/llm/chat_ollama.py

Lines changed: 0 additions & 177 deletions
This file was deleted.

src/backend/bisheng/llm/domain/llm/llm.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from bisheng.common.errcode.server import NoLlmModelConfigError, LlmModelConfigDeletedError, LlmProviderDeletedError, \
1616
LlmModelTypeError, LlmModelOfflineError, InitLlmError
17-
from bisheng.core.ai import CustomChatOllamaWithReasoning, ChatOpenAI, ChatOpenAICompatible, \
17+
from bisheng.core.ai import ChatOllama, ChatOpenAI, ChatOpenAICompatible, \
1818
AzureChatOpenAI, ChatTongyi, ChatZhipuAI, MiniMaxChat, ChatAnthropic, ChatDeepSeek, \
1919
MoonshotChat
2020
from bisheng.llm.domain.const import LLMModelType, LLMServerType
@@ -35,7 +35,6 @@ def _get_user_kwargs(model_config: dict) -> dict:
3535
def _get_ollama_params(params: dict, server_config: dict, model_config: dict) -> dict:
3636
params['base_url'] = server_config.get('base_url', '').rstrip('/')
3737
# some bugs
38-
params['extract_reasoning'] = False
3938
params['stream'] = params.pop('streaming', True)
4039
if params.get('max_tokens'):
4140
params['num_ctx'] = params.pop('max_tokens', None)
@@ -155,7 +154,7 @@ def _get_spark_params(params: dict, server_config: dict, model_config: dict) ->
155154

156155
_llm_node_type: Dict = {
157156
# Open source inference framework
158-
LLMServerType.OLLAMA.value: {'client': CustomChatOllamaWithReasoning, 'params_handler': _get_ollama_params},
157+
LLMServerType.OLLAMA.value: {'client': ChatOllama, 'params_handler': _get_ollama_params},
159158
LLMServerType.XINFERENCE.value: {'client': ChatOpenAI, 'params_handler': _get_xinference_params},
160159
LLMServerType.LLAMACPP.value: {'client': ChatOpenAI, 'params_handler': _get_openai_params},
161160
LLMServerType.VLLM.value: {'client': ChatOpenAICompatible, 'params_handler': _get_openai_params},

0 commit comments

Comments
 (0)