Skip to content

Commit 735f609

Browse files
hydropixclaude
andcommitted
refactor: deduplicate OpenAI model fetching and remove misleading fallbacks
- Extract shared _fetch_openai_models() helper used by both get_available_openai_models() and test_openai_translation_connection() - Remove hardcoded OpenAI fallback models (gpt-4o, gpt-3.5-turbo, etc.) that don't make sense for local OpenAI-compatible backends like llama.cpp or vLLM. Return empty list on failure instead, letting the caller handle the error. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent d1a6d7e commit 735f609

1 file changed

Lines changed: 39 additions & 50 deletions

File tree

benchmark/translator.py

Lines changed: 39 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -384,15 +384,16 @@ async def get_available_openrouter_models(config: BenchmarkConfig, text_only: bo
384384
return [{"id": m, "name": m} for m in OpenRouterProvider.FALLBACK_MODELS]
385385

386386

387-
async def get_available_openai_models(config: BenchmarkConfig) -> list[dict]:
387+
async def _fetch_openai_models(config: BenchmarkConfig) -> list[dict]:
388388
"""
389-
Get list of available models from an OpenAI-compatible endpoint.
389+
Fetch and filter models from an OpenAI-compatible endpoint.
390390
391391
Args:
392392
config: Benchmark configuration
393393
394394
Returns:
395-
List of model dicts with id and name
395+
List of model dicts with id, name, and owned_by fields.
396+
Empty list if the endpoint is unreachable or returns no models.
396397
"""
397398
import httpx
398399

@@ -401,37 +402,42 @@ async def get_available_openai_models(config: BenchmarkConfig) -> list[dict]:
401402
if config.openai.api_key:
402403
headers["Authorization"] = f"Bearer {config.openai.api_key}"
403404

404-
fallback_models = [
405-
{"id": "gpt-4o", "name": "gpt-4o"},
406-
{"id": "gpt-4o-mini", "name": "gpt-4o-mini"},
407-
{"id": "gpt-4-turbo", "name": "gpt-4-turbo"},
408-
{"id": "gpt-4", "name": "gpt-4"},
409-
{"id": "gpt-3.5-turbo", "name": "gpt-3.5-turbo"},
410-
]
405+
async with httpx.AsyncClient(timeout=10) as client:
406+
response = await client.get(f"{base_url}/models", headers=headers)
407+
response.raise_for_status()
411408

412-
try:
413-
async with httpx.AsyncClient(timeout=10) as client:
414-
response = await client.get(f"{base_url}/models", headers=headers)
415-
response.raise_for_status()
409+
data = response.json()
410+
models = []
411+
for model in data.get("data", []):
412+
model_id = model.get("id", "")
413+
if not model_id:
414+
continue
415+
if "embedding" in model_id.lower() or "whisper" in model_id.lower():
416+
continue
417+
models.append({
418+
"id": model_id,
419+
"name": model_id,
420+
"owned_by": model.get("owned_by", "unknown"),
421+
})
416422

417-
data = response.json()
418-
models = []
419-
for model in data.get("data", []):
420-
model_id = model.get("id", "")
421-
if not model_id:
422-
continue
423-
if "embedding" in model_id.lower() or "whisper" in model_id.lower():
424-
continue
425-
models.append({
426-
"id": model_id,
427-
"name": model_id,
428-
"owned_by": model.get("owned_by", "unknown"),
429-
})
430-
431-
models.sort(key=lambda item: item["name"].lower())
432-
return models or fallback_models
423+
models.sort(key=lambda item: item["name"].lower())
424+
return models
425+
426+
427+
async def get_available_openai_models(config: BenchmarkConfig) -> list[dict]:
428+
"""
429+
Get list of available models from an OpenAI-compatible endpoint.
430+
431+
Args:
432+
config: Benchmark configuration
433+
434+
Returns:
435+
List of model dicts with id and name. Empty list on failure.
436+
"""
437+
try:
438+
return await _fetch_openai_models(config)
433439
except Exception:
434-
return fallback_models
440+
return []
435441

436442

437443
async def test_openai_translation_connection(config: BenchmarkConfig) -> tuple[bool, str]:
@@ -446,30 +452,13 @@ async def test_openai_translation_connection(config: BenchmarkConfig) -> tuple[b
446452
"""
447453
import httpx
448454

449-
base_url = config.openai.endpoint.replace("/chat/completions", "").rstrip("/")
450-
headers = {}
451-
if config.openai.api_key:
452-
headers["Authorization"] = f"Bearer {config.openai.api_key}"
453-
454455
try:
455-
async with httpx.AsyncClient(timeout=10) as client:
456-
response = await client.get(f"{base_url}/models", headers=headers)
457-
response.raise_for_status()
458-
data = response.json()
459-
460-
models = []
461-
for model in data.get("data", []):
462-
model_id = model.get("id", "")
463-
if not model_id:
464-
continue
465-
if "embedding" in model_id.lower() or "whisper" in model_id.lower():
466-
continue
467-
models.append(model_id)
456+
models = await _fetch_openai_models(config)
468457

469458
if not models:
470459
return False, "No OpenAI-compatible models available"
471460

472-
model_names = models[:5]
461+
model_names = [m["id"] for m in models[:5]]
473462
return True, (
474463
f"OpenAI-compatible endpoint connected ({config.openai.endpoint}). "
475464
f"Available models: {', '.join(model_names)}..."

0 commit comments

Comments
 (0)