Skip to content

Commit 3bb05ac

Browse files
authored
Merge pull request #2557 from kevincheng2/develop
[LLM] update code with paddlenlp
2 parents 30c8cdc + 19ff58d commit 3bb05ac

2 files changed

Lines changed: 3 additions & 3 deletions

File tree

llm/server/server/data/processor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from abc import ABC, abstractmethod
1717

1818
from paddlenlp.transformers import Llama3Tokenizer, LlamaTokenizer
19-
from paddlenlp.utils.llm_utils import get_eos_token_id
19+
from paddlenlp.trl.llm_utils import get_eos_token_id
2020
from server.engine.config import Config
2121
from server.utils import data_processor_logger
2222

@@ -282,7 +282,7 @@ def _load_tokenizer(self):
282282
"""
283283
if self.config.use_hf_tokenizer:
284284
from transformers import AutoTokenizer
285-
return AutoTokenizer.from_pretrained(self.config.model_dir, use_fast=False)
285+
return AutoTokenizer.from_pretrained(self.config.model_dir, use_fast=False, vocab_file=os.path.join(self.config.model_dir, "sentencepiece.bpe.model"))
286286
else:
287287
from paddlenlp.transformers import AutoTokenizer
288288
return AutoTokenizer.from_pretrained(self.config.model_dir)

llm/server/server/engine/infer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import paddle
2626
import paddle.distributed as dist
2727
import paddle.distributed.fleet as fleet
28-
from paddlenlp.utils.llm_utils import get_rotary_position_embedding
28+
from paddlenlp.trl.llm_utils import get_rotary_position_embedding
2929
from paddlenlp_ops import step_paddle
3030
from server.data.processor import DataProcessor
3131
from server.engine.config import Config

0 commit comments

Comments
 (0)