Skip to content

Commit 4388aae

Browse files
committed
feat:All Chinese characters in the code have been converted to English.
1 parent 7a5db83 commit 4388aae

294 files changed

Lines changed: 6628 additions & 6626 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

src/backend/bisheng/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
# from bisheng.processing.process import load_flow_from_json # noqa: E402
77

88
try:
9-
# 通过ci去自动修改
9+
# SetujuciGo to automatic modification
1010
__version__ = '2.3.0-beta3'
1111
except metadata.PackageNotFoundError:
1212
# Case where package metadata is not available.

src/backend/bisheng/api/services/assistant.py

Lines changed: 51 additions & 51 deletions
Large diffs are not rendered by default.

src/backend/bisheng/api/services/assistant_agent.py

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333

3434
class AssistantAgent(AssistantUtils):
35-
# cohere的模型需要的特殊prompt
35+
# cohereThe special needs of the model prompt
3636
ASSISTANT_PROMPT_COHERE = """{preamble}|<instruct>|Carefully perform the following instructions, in order, starting each with a new line.
3737
Firstly, You may need to use complex and advanced reasoning to complete your task and answer the question. Think about how you can use the provided tools to answer the question and come up with a high level plan you will execute.
3838
Write 'Plan:' followed by an initial high level plan of how you will solve the problem including the tools and steps required.
@@ -47,7 +47,7 @@ class AssistantAgent(AssistantUtils):
4747
4848
Additional instructions to note:
4949
- If the user's question is in Chinese, please answer it in Chinese.
50-
- 当问题中有涉及到时间信息时,比如最近6个月、昨天、去年等,你需要用时间工具查询时间信息。
50+
- When there is time information involved in a question, such as recently6Months, yesterday, last year, etc., you need to use the time tool to query the time information.
5151
""" # noqa
5252

5353
def __init__(self, assistant_info: Assistant, chat_id: str, invoke_user_id: int):
@@ -67,7 +67,7 @@ def __init__(self, assistant_info: Assistant, chat_id: str, invoke_user_id: int)
6767
self.current_agent_executor = None
6868
self.llm: BaseLanguageModel | None = None
6969
self.llm_agent_executor = None
70-
# 知识库检索相关参数
70+
# Knowledge Base Retrieval Related Parameters
7171
self.knowledge_retriever = {'max_content': 15000, 'sort_by_source_and_index': False}
7272

7373
async def init_assistant(self, callbacks: Callbacks = None):
@@ -76,7 +76,7 @@ async def init_assistant(self, callbacks: Callbacks = None):
7676
await self.init_agent()
7777

7878
async def init_llm(self):
79-
# 获取配置的助手模型列表
79+
# Get a list of configured helper models
8080
assistant_llm = await LLMService.get_assistant_llm()
8181
if not assistant_llm.llm_list:
8282
raise AssistantModelEmptyError()
@@ -96,7 +96,7 @@ async def init_llm(self):
9696
'sort_by_source_and_index': default_llm.knowledge_sort_index
9797
}
9898

99-
# 初始化llm
99+
# Inisialisasillm
100100
self.llm = await LLMService.get_bisheng_llm(model_id=default_llm.model_id,
101101
temperature=self.assistant.temperature,
102102
streaming=default_llm.streaming,
@@ -106,7 +106,7 @@ async def init_llm(self):
106106
user_id=self.invoke_user_id)
107107

108108
async def init_auto_update_llm(self):
109-
""" 初始化自动优化prompt等信息的llm实例 """
109+
""" Initialize Automatic Optimization prompt and other information.llmInstances """
110110
assistant_llm = await LLMService.get_assistant_llm()
111111
if not assistant_llm.auto_llm:
112112
raise AssistantAutoLLMError()
@@ -120,7 +120,7 @@ async def init_auto_update_llm(self):
120120
user_id=self.invoke_user_id)
121121

122122
async def init_tools(self, callbacks: Callbacks = None):
123-
"""通过名称获取tool 列表
123+
"""Get by nametool Vertical
124124
tools_name_param:: {name: params}
125125
"""
126126
links: List[AssistantLink] = await AssistantLinkDao.get_assistant_link(
@@ -191,38 +191,38 @@ async def init_tools(self, callbacks: Callbacks = None):
191191

192192
async def init_agent(self):
193193
"""
194-
初始化智能体的agent
194+
Initialize agentagent
195195
"""
196-
# 引入agent执行参数
196+
# Introductionagentexecution parameter
197197
agent_executor_type = self.llm_agent_executor
198198
self.current_agent_executor = agent_executor_type
199-
# 做转换
199+
# Do the Conversion
200200
agent_executor_type = self.agent_executor_dict.get(agent_executor_type,
201201
agent_executor_type)
202202

203203
prompt = self.assistant.prompt
204204
if getattr(self.llm, 'model_name', '').startswith('command-r'):
205205
prompt = self.ASSISTANT_PROMPT_COHERE.format(preamble=prompt)
206206
if self.current_agent_executor == 'ReAct':
207-
# 初始化agent
207+
# Inisialisasiagent
208208
self.agent = ConfigurableAssistant(agent_executor_type=agent_executor_type,
209209
tools=self.tools,
210210
llm=self.llm,
211211
assistant_message=prompt)
212212
else:
213-
# function-calling模式,也添加递归限制
213+
# function-callingpattern, but also add recursive constraints
214214
logger.info(f'Creating LangGraph agent with {len(self.tools)} tools, llm type: {type(self.llm)}')
215215
logger.info(f'LLM streaming capability: {getattr(self.llm, "streaming", "unknown")}')
216216

217217
self.agent = create_react_agent(self.llm, self.tools, prompt=prompt, checkpointer=False)
218218
logger.info(f'LangGraph agent created: {type(self.agent)}')
219219

220-
# 为agent添加递归限制配置
220+
# areagentAdd Recursive Limit Configuration
221221
self.agent = self.agent.with_config({'recursion_limit': 100})
222222
logger.info(f'Agent config applied: recursion_limit=100')
223223

224224
async def optimize_assistant_prompt(self):
225-
""" 自动优化生成prompt """
225+
""" Automatically optimize generationprompt """
226226
chain = ({
227227
'assistant_name': lambda x: x['assistant_name'],
228228
'assistant_description': lambda x: x['assistant_description'],
@@ -239,16 +239,16 @@ def sync_optimize_assistant_prompt(self):
239239
return optimize_assistant_prompt(self.llm, self.assistant.name, self.assistant.desc)
240240

241241
def generate_guide(self, prompt: str):
242-
""" 生成开场对话和开场问题 """
242+
""" Generate opening dialogue and opening questions """
243243
return generate_opening_dialog(self.llm, prompt)
244244

245245
def generate_description(self, prompt: str):
246-
""" 生成描述对话 """
246+
""" Generate description dialog """
247247
return generate_breif_description(self.llm, prompt)
248248

249249
def choose_tools(self, tool_list: List[Dict[str, str]], prompt: str) -> List[str]:
250250
"""
251-
选择工具
251+
Choose A Tool
252252
tool_list: [{name: xxx, description: xxx}]
253253
"""
254254
tool_list = [
@@ -261,7 +261,7 @@ def choose_tools(self, tool_list: List[Dict[str, str]], prompt: str) -> List[str
261261
async def fake_callback(self, callback: Callbacks):
262262
if not callback:
263263
return
264-
# 假回调,将已下线的技能回调给前端
264+
# False callback to call back skills that are offline to the front-end
265265
for one in self.offline_flows:
266266
run_id = uuid.uuid4()
267267
await callback[0].on_tool_start({
@@ -272,7 +272,7 @@ async def fake_callback(self, callback: Callbacks):
272272
await callback[0].on_tool_end(output='flow is offline', name=one, run_id=run_id)
273273

274274
async def record_chat_history(self, message: List[Any]):
275-
# 记录助手的聊天历史
275+
# Record Assistant Chat History
276276
if not os.getenv('BISHENG_RECORD_HISTORY'):
277277
return
278278
try:
@@ -292,11 +292,11 @@ async def record_chat_history(self, message: List[Any]):
292292
logger.error(f'record assistant history error: {str(e)}')
293293

294294
async def trim_messages(self, messages: List[Any]) -> List[Any]:
295-
# 获取encoding
295+
# Dapatkanencoding
296296
enc = self.cl100k_base()
297297

298298
def get_finally_message(new_messages: List[Any]) -> List[Any]:
299-
# 修剪到只有一条记录则不再处理
299+
# No more processing until only one record has been trimmed
300300
if len(new_messages) == 1:
301301
return new_messages
302302
total_count = 0
@@ -319,7 +319,7 @@ def get_finally_message(new_messages: List[Any]) -> List[Any]:
319319

320320
async def run(self, query: str, chat_history: List = None, callback: Callbacks = None) -> List[BaseMessage]:
321321
"""
322-
运行智能体对话
322+
Run Agent Conversation
323323
"""
324324
await self.fake_callback(callback)
325325

@@ -338,14 +338,14 @@ async def run(self, query: str, chat_history: List = None, callback: Callbacks =
338338
result = await self.agent.ainvoke({'messages': inputs}, config=RunnableConfig(callbacks=callback))
339339
result = result['messages']
340340

341-
# 记录聊天历史
341+
# Record Chat History
342342
await self.record_chat_history([one.to_json() for one in result])
343343

344344
return result
345345

346346
async def astream(self, query: str, chat_history: List = None, callback: Callbacks = None):
347347
"""
348-
运行智能体对话 - 流式版本
348+
Run Agent Conversation - Streaming version
349349
"""
350350
await self.fake_callback(callback)
351351

@@ -359,13 +359,13 @@ async def astream(self, query: str, chat_history: List = None, callback: Callbac
359359
inputs = await self.trim_messages(inputs)
360360

361361
if self.current_agent_executor == 'ReAct':
362-
# ReAct模式暂时不支持流式,降级到非流式
362+
# ReActMode temporarily does not support streaming, downgrade to non streaming
363363
result = await self.react_run(inputs, callback)
364-
# 记录聊天历史
364+
# Record Chat History
365365
await self.record_chat_history([one.to_json() for one in result])
366366
yield result
367367
else:
368-
# 使用流式调用
368+
# Use Streaming Calls
369369
config = RunnableConfig(callbacks=callback)
370370
final_messages = []
371371

@@ -374,21 +374,21 @@ async def astream(self, query: str, chat_history: List = None, callback: Callbac
374374
chunk_count = 0
375375

376376
try:
377-
# 使用messages模式的LangGraph streaming获得token级别的流式输出
377+
# UsemessagesPatternedLangGraph streamingattaintokenLevel of Streaming Output
378378
async for chunk in self.agent.astream({'messages': inputs}, config=config, stream_mode="messages"):
379379
chunk_count += 1
380380

381-
# stream_mode="messages" 返回 (message, metadata) 元组
381+
# stream_mode="messages" Return (message, metadata) Meta Group
382382
message = None
383383
if isinstance(chunk, tuple) and len(chunk) >= 2:
384384
message, metadata = chunk[:2]
385385
elif hasattr(chunk, 'content'):
386-
# 直接是消息对象
386+
# Directly to the message object
387387
message = chunk
388388

389389
if message:
390-
# stream_mode="messages"返回的是独立chunk,直接使用其内容
391-
final_messages = [message] # 保存消息用于历史记录
390+
# stream_mode="messages"Returns Independencechunk, use its content directly
391+
final_messages = [message] # Save message for history
392392
yield [message]
393393

394394
except Exception as astream_error:
@@ -400,12 +400,12 @@ async def astream(self, query: str, chat_history: List = None, callback: Callbac
400400
if chunk_count == 0:
401401
logger.warning(f'No chunks received from agent.astream()! This indicates a streaming issue.')
402402

403-
# 记录聊天历史
403+
# Record Chat History
404404
if final_messages:
405405
await self.record_chat_history([one.to_json() for one in final_messages])
406406

407407
async def react_run(self, inputs: List, callback: Callbacks = None):
408-
""" react 模式的输入和执行 """
408+
""" react Mode input and execution """
409409
result = await self.agent.ainvoke({
410410
'input': inputs[-1].content,
411411
'chat_history': inputs[:-1],

src/backend/bisheng/api/services/assistant_base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
class AssistantUtils:
8-
# 忽略助手配置已从系统配置中移除,暂不需要此类的方法
8+
# Ignore assistant configuration has been removed from the system configuration, no such method is required for now
99

1010
@staticmethod
1111
def cl100k_base() -> TikTokenEncoding:

0 commit comments

Comments
 (0)