Skip to content

Commit 5b6a3bb

Browse files
committed
Sync auto-llm-pr-review.yml from .github repo
1 parent ad46b4d commit 5b6a3bb

1 file changed

Lines changed: 82 additions & 4 deletions

File tree

.github/workflows/auto-llm-pr-review.yml

Lines changed: 82 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ on:
2424
- gemini
2525
- anthropic
2626
llm_model:
27-
description: "Model name (provider-specific)"
27+
description: "Model name (provider-specific, e.g. gpt-5.4, gpt-5.4-pro, gpt-5.3-codex)"
2828
required: false
2929
default: ""
3030
type: string
@@ -233,14 +233,88 @@ jobs:
233233
"- End with a short 'Approve / Request changes' recommendation",
234234
].join("\n");
235235
236+
function getOpenAIReasoningEffort(model) {
237+
const normalized = String(model || "").trim().toLowerCase();
238+
if (!normalized) return null;
239+
if (normalized === "gpt-5.4" || normalized === "gpt-5.4-pro" || normalized === "gpt-5.3-codex") {
240+
return "xhigh";
241+
}
242+
if (normalized === "gpt-5-pro") {
243+
return "high";
244+
}
245+
if (/^gpt-5(?:[.-]|$)/.test(normalized)) {
246+
return "high";
247+
}
248+
return null;
249+
}
250+
251+
function extractResponsesText(data) {
252+
if (typeof data?.output_text === "string" && data.output_text.trim()) {
253+
return data.output_text.trim();
254+
}
255+
256+
const parts = [];
257+
for (const item of data?.output || []) {
258+
if (item?.type !== "message") continue;
259+
for (const content of item.content || []) {
260+
if ((content?.type === "output_text" || content?.type === "text") && typeof content.text === "string") {
261+
parts.push(content.text);
262+
}
263+
}
264+
}
265+
266+
return parts.join("\n").trim();
267+
}
268+
236269
async function callOpenAI({ apiKey, baseUrl, model, messages }) {
237270
if (!apiKey) throw new Error("OPENAI_API_KEY is not set.");
271+
const normalizedModel = String(model || "").trim().toLowerCase();
272+
const reasoningEffort = getOpenAIReasoningEffort(normalizedModel);
273+
274+
if (normalizedModel === "gpt-5.3-codex") {
275+
const url = `${baseUrl.replace(/\/$/, "")}/responses`;
276+
const payload = {
277+
model,
278+
input: messages.map(message => ({
279+
role: message.role,
280+
content: message.content,
281+
})),
282+
max_output_tokens: 2048,
283+
};
284+
285+
if (reasoningEffort) {
286+
payload.reasoning = { effort: reasoningEffort };
287+
}
288+
289+
const resp = await fetch(url, {
290+
method: "POST",
291+
headers: {
292+
"Authorization": `Bearer ${apiKey}`,
293+
"Content-Type": "application/json",
294+
},
295+
body: JSON.stringify(payload),
296+
});
297+
298+
if (!resp.ok) {
299+
const text = await resp.text();
300+
throw new Error(`OpenAI Responses API error (${resp.status}): ${text}`);
301+
}
302+
303+
const data = await resp.json();
304+
const content = extractResponsesText(data);
305+
if (!content) throw new Error("OpenAI Responses API returned no content.");
306+
return { content, reasoningEffort };
307+
}
308+
238309
const url = `${baseUrl.replace(/\/$/, "")}/chat/completions`;
239310
const payload = { model, messages };
240311
241312
const isGpt5ish = /gpt-?5/i.test(model) || /^o\d/i.test(model) || /^o1/i.test(model);
242313
if (isGpt5ish) {
243314
payload.max_completion_tokens = 2048;
315+
if (reasoningEffort) {
316+
payload.reasoning_effort = reasoningEffort;
317+
}
244318
} else {
245319
payload.max_tokens = 2048;
246320
payload.temperature = 0.2;
@@ -263,7 +337,7 @@ jobs:
263337
const data = await resp.json();
264338
const content = data?.choices?.[0]?.message?.content;
265339
if (!content) throw new Error("OpenAI API returned no content.");
266-
return content;
340+
return { content, reasoningEffort };
267341
}
268342
269343
async function callGemini({ apiKey, model, prompt }) {
@@ -330,6 +404,7 @@ jobs:
330404
let chosenModel = modelOverride;
331405
let provider = providerFromEnv;
332406
let chosenLabel = reviewLabel;
407+
let reasoningEffort = "";
333408
334409
if (labelParsed) {
335410
provider = labelParsed.provider;
@@ -338,8 +413,8 @@ jobs:
338413
}
339414
340415
if (provider === "openai") {
341-
chosenModel = chosenModel || "gpt-5";
342-
reviewText = await callOpenAI({
416+
chosenModel = chosenModel || "gpt-5.4";
417+
const openAIResult = await callOpenAI({
343418
apiKey: process.env.OPENAI_API_KEY,
344419
baseUrl: process.env.OPENAI_BASE_URL || "https://api.openai.com/v1",
345420
model: chosenModel,
@@ -348,6 +423,8 @@ jobs:
348423
{ role: "user", content: userPrompt },
349424
],
350425
});
426+
reviewText = openAIResult.content;
427+
reasoningEffort = openAIResult.reasoningEffort || "";
351428
} else if (provider === "gemini") {
352429
chosenModel = chosenModel || "gemini-1.5-pro";
353430
reviewText = await callGemini({
@@ -376,6 +453,7 @@ jobs:
376453
`- Provider: \`${provider}\``,
377454
`- Model: \`${chosenModel}\``,
378455
`- Trigger: \`${chosenLabel}\``,
456+
...(reasoningEffort ? [`- Reasoning effort: \`${reasoningEffort}\``] : []),
379457
`- Generated: \`${now}\``,
380458
``,
381459
];

0 commit comments

Comments
 (0)