Skip to content

Commit 64452f7

Browse files
committed
feat(api): Add MiniMax AI provider support
Add MiniMax as a new API provider with: - MiniMaxProvider class implementing AIProvider protocol - OpenAI-compatible API at https://api.minimax.io/v1 - Temperature range clamped to 0.01-1.0 per MiniMax spec - Support for models: M2.7, M2.7-highspeed, M2.5, M2.5-highspeed, M2-her - 200k context window, 128k max output tokens - Integrated with EndpointManager, ProviderType enum, and UI
1 parent 72ff878 commit 64452f7

6 files changed

Lines changed: 259 additions & 3 deletions

File tree

Resources/whats-new.json

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,19 @@
11
{
22
"releases": [
3+
{
4+
"version": "20260329.1",
5+
"release_date": "March 29, 2026",
6+
"introduction": "This release adds support for MiniMax AI provider, enabling access to MiniMax's powerful language models with extended context windows.",
7+
"improvements": [
8+
{
9+
"id": "minimax-provider",
10+
"icon": "m.circle.fill",
11+
"title": "MiniMax AI Provider",
12+
"description": "SAM now supports MiniMax AI, featuring models with up to 200,000 token context windows and 128,000 token output support. Configure your MiniMax API key in Preferences to get started."
13+
}
14+
],
15+
"bugfixes": []
16+
},
317
{
418
"version": "20260316.2",
519
"release_date": "March 16, 2026",

Sources/APIFramework/AIProvider.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ public struct ResponseNormalizer {
115115
) throws -> ServerOpenAIChatResponse {
116116

117117
switch providerType {
118-
case .openai, .localLlama, .localMLX, .gemini, .openrouter:
118+
case .openai, .localLlama, .localMLX, .gemini, .openrouter, .minimax:
119119
/// Already in OpenAI format or uses OpenAI format.
120120
if let openAIResponse = providerResponse as? ServerOpenAIChatResponse {
121121
return openAIResponse

Sources/APIFramework/EndpointManager.swift

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -864,6 +864,15 @@ public class EndpointManager: ObservableObject {
864864
models: []
865865
)
866866

867+
case .minimax:
868+
return ProviderConfiguration(
869+
providerId: "minimax",
870+
providerType: .minimax,
871+
isEnabled: false,
872+
baseURL: "https://api.minimax.io/v1",
873+
models: []
874+
)
875+
867876
case .openrouter:
868877
return ProviderConfiguration(
869878
providerId: "openrouter",
@@ -919,6 +928,9 @@ public class EndpointManager: ObservableObject {
919928
case .gemini:
920929
return GeminiProvider(config: config)
921930

931+
case .minimax:
932+
return MiniMaxProvider(config: config)
933+
922934
case .openrouter:
923935
return OpenRouterProvider(config: config)
924936

Sources/APIFramework/ExtendedProviders.swift

Lines changed: 210 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,216 @@ public class DeepSeekProvider: AIProvider {
207207
}
208208
}
209209

210+
// MARK: - MiniMax Provider
211+
212+
/// Provider for MiniMax AI API.
213+
/// MiniMax uses OpenAI-compatible API format at https://api.minimax.io/v1
214+
@MainActor
215+
public class MiniMaxProvider: AIProvider {
216+
public let identifier: String
217+
public let config: ProviderConfiguration
218+
private let logger = Logger(label: "com.sam.api.minimax")
219+
220+
public init(config: ProviderConfiguration) {
221+
self.identifier = config.providerId
222+
self.config = config
223+
logger.debug("MiniMax Provider initialized")
224+
}
225+
226+
public func processStreamingChatCompletion(_ request: OpenAIChatRequest) async throws -> AsyncThrowingStream<ServerOpenAIChatStreamChunk, Error> {
227+
return AsyncThrowingStream { continuation in
228+
Task {
229+
do {
230+
/// Check cancellation before HTTP request.
231+
if Task.isCancelled {
232+
self.logger.debug("TASK_CANCELLED: MiniMax request cancelled before start")
233+
continuation.finish()
234+
return
235+
}
236+
237+
let response = try await self.processChatCompletion(request)
238+
let chunks = self.convertToStreamChunks(response)
239+
240+
for chunk in chunks {
241+
/// Check cancellation in streaming loop.
242+
if Task.isCancelled {
243+
self.logger.debug("TASK_CANCELLED: MiniMax streaming cancelled")
244+
continuation.finish()
245+
return
246+
}
247+
248+
continuation.yield(chunk)
249+
try await Task.sleep(nanoseconds: 50_000_000)
250+
}
251+
252+
continuation.finish()
253+
} catch {
254+
continuation.finish(throwing: error)
255+
}
256+
}
257+
}
258+
}
259+
260+
private func convertToStreamChunks(_ response: ServerOpenAIChatResponse) -> [ServerOpenAIChatStreamChunk] {
261+
guard let choice = response.choices.first else { return [] }
262+
var chunks: [ServerOpenAIChatStreamChunk] = []
263+
264+
chunks.append(ServerOpenAIChatStreamChunk(
265+
id: response.id,
266+
object: "chat.completion.chunk",
267+
created: response.created,
268+
model: response.model,
269+
choices: [OpenAIChatStreamChoice(index: 0, delta: OpenAIChatDelta(role: "assistant", content: nil))]
270+
))
271+
272+
let words = (choice.message.content ?? "").components(separatedBy: .whitespacesAndNewlines).filter { !$0.isEmpty }
273+
for word in words {
274+
chunks.append(ServerOpenAIChatStreamChunk(
275+
id: response.id,
276+
object: "chat.completion.chunk",
277+
created: response.created,
278+
model: response.model,
279+
choices: [OpenAIChatStreamChoice(index: 0, delta: OpenAIChatDelta(content: word + " "))]
280+
))
281+
}
282+
283+
chunks.append(ServerOpenAIChatStreamChunk(
284+
id: response.id,
285+
object: "chat.completion.chunk",
286+
created: response.created,
287+
model: response.model,
288+
choices: [OpenAIChatStreamChoice(index: 0, delta: OpenAIChatDelta(), finishReason: "stop")]
289+
))
290+
291+
return chunks
292+
}
293+
294+
public func processChatCompletion(_ request: OpenAIChatRequest) async throws -> ServerOpenAIChatResponse {
295+
let requestId = UUID().uuidString
296+
logger.debug("Processing chat completion via MiniMax API [req:\(requestId.prefix(8))]")
297+
298+
guard let apiKey = config.apiKey else {
299+
throw ProviderError.authenticationFailed("MiniMax API key not configured")
300+
}
301+
302+
guard let baseURL = config.baseURL ?? ProviderType.minimax.defaultBaseURL else {
303+
throw ProviderError.invalidConfiguration("MiniMax base URL not configured")
304+
}
305+
306+
/// MiniMax uses OpenAI-compatible API format.
307+
guard let url = URL(string: "\(baseURL)/chat/completions") else {
308+
throw ProviderError.invalidConfiguration("Invalid MiniMax base URL: \(baseURL)")
309+
}
310+
311+
var urlRequest = URLRequest(url: url)
312+
urlRequest.httpMethod = "POST"
313+
urlRequest.setValue("application/json", forHTTPHeaderField: "Content-Type")
314+
urlRequest.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization")
315+
316+
/// MiniMax has specific temperature range: 0.01 to 1.0
317+
let temperature = request.temperature ?? config.temperature ?? 0.7
318+
let clampedTemperature = min(max(temperature, 0.01), 1.0)
319+
320+
/// Create request body.
321+
let requestBody: [String: Any] = [
322+
"model": request.model,
323+
"messages": request.messages.map { message in
324+
[
325+
"role": message.role,
326+
"content": message.content
327+
]
328+
},
329+
"max_tokens": request.maxTokens ?? config.maxTokens ?? 8192,
330+
"temperature": clampedTemperature,
331+
"stream": false
332+
]
333+
334+
do {
335+
urlRequest.httpBody = try JSONSerialization.data(withJSONObject: requestBody)
336+
} catch {
337+
throw ProviderError.networkError("Failed to serialize request: \(error.localizedDescription)")
338+
}
339+
340+
/// Set timeout (5 minutes minimum for tool-enabled requests).
341+
/// Even if config specifies lower timeout, enforce 300s minimum to prevent timeouts.
342+
let configuredTimeout = TimeInterval(config.timeoutSeconds ?? 300)
343+
urlRequest.timeoutInterval = max(configuredTimeout, 300)
344+
345+
logger.debug("Sending request to MiniMax API [req:\(requestId.prefix(8))]")
346+
347+
do {
348+
let (data, response) = try await URLSession.shared.data(for: urlRequest)
349+
350+
guard let httpResponse = response as? HTTPURLResponse else {
351+
throw ProviderError.networkError("Invalid response type")
352+
}
353+
354+
logger.debug("MiniMax API response [req:\(requestId.prefix(8))]: \(httpResponse.statusCode)")
355+
356+
guard 200...299 ~= httpResponse.statusCode else {
357+
if let errorData = String(data: data, encoding: .utf8) {
358+
logger.error("MiniMax API error [req:\(requestId.prefix(8))]: \(errorData)")
359+
}
360+
throw ProviderError.networkError("MiniMax API returned status \(httpResponse.statusCode)")
361+
}
362+
363+
/// Parse response.
364+
let minimaxResponse = try JSONDecoder().decode(ServerOpenAIChatResponse.self, from: data)
365+
logger.debug("Successfully processed MiniMax response [req:\(requestId.prefix(8))]")
366+
367+
return minimaxResponse
368+
369+
} catch let error as ProviderError {
370+
throw error
371+
} catch {
372+
logger.error("MiniMax API request failed [req:\(requestId.prefix(8))]: \(error)")
373+
throw ProviderError.networkError("Network error: \(error.localizedDescription)")
374+
}
375+
}
376+
377+
public func getAvailableModels() async throws -> ServerOpenAIModelsResponse {
378+
let models = config.models.map { modelId in
379+
ServerOpenAIModel(
380+
id: modelId,
381+
object: "model",
382+
created: Int(Date().timeIntervalSince1970),
383+
ownedBy: "minimax"
384+
)
385+
}
386+
387+
return ServerOpenAIModelsResponse(
388+
object: "list",
389+
data: models
390+
)
391+
}
392+
393+
public func supportsModel(_ model: String) -> Bool {
394+
return config.models.contains(model) || model.hasPrefix("MiniMax-")
395+
}
396+
397+
public func validateConfiguration() async throws -> Bool {
398+
guard let apiKey = config.apiKey, !apiKey.isEmpty else {
399+
throw ProviderError.authenticationFailed("MiniMax API key is required")
400+
}
401+
402+
return true
403+
}
404+
405+
// MARK: - Lifecycle
406+
407+
public func loadModel() async throws -> ModelCapabilities {
408+
throw ProviderError.invalidRequest("loadModel() not supported for remote providers")
409+
}
410+
411+
public func getLoadedStatus() async -> Bool {
412+
return false
413+
}
414+
415+
public func unload() async {
416+
/// No-op for remote providers.
417+
}
418+
}
419+
210420
// MARK: - Supporting Types
211421

212422
public struct ModelStatus {

Sources/ConfigurationSystem/EndpointConfigurationModels.swift

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,7 @@ public enum ProviderType: String, CaseIterable, Codable {
180180
case githubCopilot = "github-copilot"
181181
case deepseek = "deepseek"
182182
case gemini = "gemini"
183+
case minimax = "minimax"
183184
case openrouter = "openrouter"
184185
case localLlama = "local-llama"
185186
case localMLX = "local-mlx"
@@ -192,6 +193,7 @@ public enum ProviderType: String, CaseIterable, Codable {
192193
case .githubCopilot: return "GitHub Copilot"
193194
case .deepseek: return "DeepSeek"
194195
case .gemini: return "Google Gemini"
196+
case .minimax: return "MiniMax"
195197
case .openrouter: return "OpenRouter"
196198
case .localLlama: return "Local Models (llama.cpp)"
197199
case .localMLX: return "Local Models (MLX)"
@@ -211,6 +213,7 @@ public enum ProviderType: String, CaseIterable, Codable {
211213
case .githubCopilot: return "github_copilot"
212214
case .deepseek: return "deepseek"
213215
case .gemini: return "gemini"
216+
case .minimax: return "minimax"
214217
case .openrouter: return "openrouter"
215218
case .localLlama: return "llama"
216219
case .localMLX: return "mlx"
@@ -221,7 +224,7 @@ public enum ProviderType: String, CaseIterable, Codable {
221224
public var requiresApiKey: Bool {
222225
switch self {
223226
case .localLlama, .localMLX: return false
224-
case .openai, .anthropic, .githubCopilot, .deepseek, .gemini, .openrouter, .custom: return true
227+
case .openai, .anthropic, .githubCopilot, .deepseek, .gemini, .minimax, .openrouter, .custom: return true
225228
}
226229
}
227230

@@ -232,6 +235,7 @@ public enum ProviderType: String, CaseIterable, Codable {
232235
case .githubCopilot: return "https://api.githubcopilot.com"
233236
case .deepseek: return "https://api.deepseek.com/v1"
234237
case .gemini: return "https://generativelanguage.googleapis.com/v1beta"
238+
case .minimax: return "https://api.minimax.io/v1"
235239
case .openrouter: return "https://openrouter.ai/api/v1"
236240
case .localLlama, .localMLX, .custom: return nil
237241
}
@@ -254,6 +258,9 @@ public enum ProviderType: String, CaseIterable, Codable {
254258
case .gemini:
255259
return []
256260

261+
case .minimax:
262+
return ["MiniMax-M2.7", "MiniMax-M2.7-highspeed", "MiniMax-M2.5", "MiniMax-M2.5-highspeed", "M2-her"]
263+
257264
case .openrouter:
258265
return []
259266

@@ -282,6 +289,9 @@ public enum ProviderType: String, CaseIterable, Codable {
282289
case .gemini:
283290
return "g.circle.fill"
284291

292+
case .minimax:
293+
return "m.circle.fill"
294+
285295
case .openrouter:
286296
return "arrow.triangle.merge"
287297

@@ -311,6 +321,9 @@ public enum ProviderType: String, CaseIterable, Codable {
311321
case .gemini:
312322
return .blue
313323

324+
case .minimax:
325+
return .mint
326+
314327
case .openrouter:
315328
return .teal
316329

Sources/UserInterface/EndpointManagementView.swift

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,9 @@ struct EndpointManagementView: View {
266266
case .gemini:
267267
return "Get your API key from aistudio.google.com"
268268

269+
case .minimax:
270+
return "Get your API key from platform.minimax.io"
271+
269272
case .openrouter:
270273
return "Get your API key from openrouter.ai/keys"
271274

@@ -415,6 +418,7 @@ struct ProviderRowView: View {
415418
case .githubCopilot: return "arrow.triangle.branch"
416419
case .deepseek: return "magnifyingglass"
417420
case .gemini: return "globe"
421+
case .minimax: return "m.circle"
418422
case .openrouter: return "arrow.triangle.merge"
419423
case .localLlama: return "laptopcomputer"
420424
case .localMLX: return "flame"
@@ -869,6 +873,9 @@ struct ProviderConfigurationSheet: View {
869873
case .gemini:
870874
return "Create an API key at aistudio.google.com"
871875

876+
case .minimax:
877+
return "Get your API key from platform.minimax.io"
878+
872879
case .openrouter:
873880
return "Get your API key from openrouter.ai/keys - provides access to 400+ AI models"
874881

@@ -933,7 +940,7 @@ struct ProviderConfigurationSheet: View {
933940
/// Add authentication if required.
934941
if providerType.requiresApiKey && !apiKey.isEmpty {
935942
switch providerType {
936-
case .openai, .deepseek, .custom:
943+
case .openai, .deepseek, .minimax, .custom:
937944
request.setValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization")
938945

939946
case .openrouter:

0 commit comments

Comments
 (0)