Skip to content

Commit f548d6a

Browse files
committed
Fix issue where number of tokens was too high.
Gets each model's maximum number of tokens before sending the query. This should be consolidated into another file, as it will be shared with Settings in a future release. Close #14.
1 parent 18e1539 commit f548d6a

1 file changed

Lines changed: 15 additions & 1 deletion

File tree

src/agent.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,6 +105,20 @@ def get_model(self) -> str:
105105
assert self.is_valid_model('text-davinci-003')
106106
return 'text-davinci-003'
107107

108+
def max_token_count(self, model: str) -> int:
109+
'''Returns the maximum number of tokens that can be generated by the
110+
model. Returns a default of 2,048 if the model is not found. '''
111+
# TODO: This should be somewhere else, as it's also shared by Settings.
112+
models: dict[str, int] = {
113+
'text-davinci-003': 4_000,
114+
'text-curie-001': 2_048,
115+
'text-babbage-001': 2_048,
116+
'text-ada-001': 2_048,
117+
'code-davinci-002': 8_000,
118+
'code-cushman-001': 2_048
119+
}
120+
return models.get(model, 2_048)
121+
108122
def instruction_list(self, function: Union[LowLevelILFunction,
109123
MediumLevelILFunction,
110124
HighLevelILFunction]) -> list[str]:
@@ -136,6 +150,6 @@ def send_query(self, query: str) -> str:
136150
response: str = openai.Completion.create(
137151
model=self.model,
138152
prompt=query,
139-
max_tokens=2_048
153+
max_tokens=self.max_token_count(self.model) - len(query),
140154
)
141155
return response.choices[0].text

0 commit comments

Comments
 (0)